From 2bdba30963e550728ba2903d57daa1e666a29d71 Mon Sep 17 00:00:00 2001 From: Trivikram Kamat <16024985+trivikr@users.noreply.github.com> Date: Wed, 1 Dec 2021 14:00:43 -0800 Subject: [PATCH] feat(clients): update clients as of 2021/11/30 (#3077) --- .../src/models/models_0.ts | 27 +- .../src/protocols/Aws_restJson1.ts | 2 + clients/client-backup-gateway/.gitignore | 9 + clients/client-backup-gateway/LICENSE | 201 + clients/client-backup-gateway/README.md | 212 + clients/client-backup-gateway/jest.config.js | 4 + clients/client-backup-gateway/package.json | 94 + .../src/BackupGateway.ts | 603 ++ .../src/BackupGatewayClient.ts | 326 + .../AssociateGatewayToServerCommand.ts | 96 + .../src/commands/CreateGatewayCommand.ts | 96 + .../src/commands/DeleteGatewayCommand.ts | 95 + .../src/commands/DeleteHypervisorCommand.ts | 95 + .../DisassociateGatewayFromServerCommand.ts | 101 + .../ImportHypervisorConfigurationCommand.ts | 100 + .../src/commands/ListGatewaysCommand.ts | 95 + .../src/commands/ListHypervisorsCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 96 + .../commands/ListVirtualMachinesCommand.ts | 95 + .../PutMaintenanceStartTimeCommand.ts | 95 + .../src/commands/TagResourceCommand.ts | 95 + .../TestHypervisorConfigurationCommand.ts | 99 + .../src/commands/UntagResourceCommand.ts | 95 + .../UpdateGatewayInformationCommand.ts | 96 + .../src/commands/UpdateHypervisorCommand.ts | 97 + .../src/commands/index.ts | 16 + .../client-backup-gateway/src/endpoints.ts | 134 + clients/client-backup-gateway/src/index.ts | 5 + .../client-backup-gateway/src/models/index.ts | 1 + .../src/models/models_0.ts | 968 +++ .../src/pagination/Interfaces.ts | 8 + .../src/pagination/ListGatewaysPaginator.ts | 59 + .../pagination/ListHypervisorsPaginator.ts | 59 + .../ListVirtualMachinesPaginator.ts | 59 + .../src/pagination/index.ts | 4 + .../src/protocols/Aws_json1_0.ts | 2041 +++++ .../src/runtimeConfig.browser.ts | 44 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + .../src/runtimeConfig.ts | 53 + .../client-backup-gateway/tsconfig.es.json | 10 + clients/client-backup-gateway/tsconfig.json | 32 + .../client-backup-gateway/tsconfig.types.json | 10 + clients/client-compute-optimizer/README.md | 17 +- .../src/ComputeOptimizer.ts | 199 +- .../src/ComputeOptimizerClient.ts | 24 + .../DeleteRecommendationPreferencesCommand.ts | 106 + ...DescribeRecommendationExportJobsCommand.ts | 1 - ...tAutoScalingGroupRecommendationsCommand.ts | 2 - .../ExportEBSVolumeRecommendationsCommand.ts | 5 +- ...ExportEC2InstanceRecommendationsCommand.ts | 5 +- ...ortLambdaFunctionRecommendationsCommand.ts | 5 +- ...tAutoScalingGroupRecommendationsCommand.ts | 1 - .../GetEBSVolumeRecommendationsCommand.ts | 1 - .../GetEC2InstanceRecommendationsCommand.ts | 1 - ...C2RecommendationProjectedMetricsCommand.ts | 1 - ...fectiveRecommendationPreferencesCommand.ts | 112 + .../commands/GetEnrollmentStatusCommand.ts | 1 - ...nrollmentStatusesForOrganizationCommand.ts | 1 - ...GetLambdaFunctionRecommendationsCommand.ts | 1 - .../GetRecommendationPreferencesCommand.ts | 107 + .../GetRecommendationSummariesCommand.ts | 2 - .../PutRecommendationPreferencesCommand.ts | 104 + .../commands/UpdateEnrollmentStatusCommand.ts | 3 - .../src/commands/index.ts | 4 + .../src/models/models_0.ts | 1060 ++- .../src/protocols/Aws_json1_0.ts | 881 ++- .../client-dataexchange/src/DataExchange.ts | 34 + .../src/DataExchangeClient.ts | 3 + .../src/commands/SendApiAssetCommand.ts | 95 + .../client-dataexchange/src/commands/index.ts | 1 + .../src/models/models_0.ts | 296 +- .../src/protocols/Aws_restJson1.ts | 228 +- clients/client-ec2/src/EC2.ts | 225 + clients/client-ec2/src/EC2Client.ts | 27 + ...pcEndpointConnectionNotificationCommand.ts | 6 +- .../DescribeHostReservationsCommand.ts | 3 +- .../DescribeSnapshotTierStatusCommand.ts | 98 + .../DescribeSpotPriceHistoryCommand.ts | 3 +- .../DescribeStaleSecurityGroupsCommand.ts | 3 +- .../ListSnapshotsInRecycleBinCommand.ts | 98 + .../src/commands/ModifyHostsCommand.ts | 3 +- .../src/commands/ModifyIdFormatCommand.ts | 2 +- .../commands/ModifyIdentityIdFormatCommand.ts | 2 +- .../commands/ModifyImageAttributeCommand.ts | 2 +- .../src/commands/ModifySnapshotTierCommand.ts | 99 + .../commands/ModifySubnetAttributeCommand.ts | 30 + .../RestoreSnapshotFromRecycleBinCommand.ts | 101 + .../commands/RestoreSnapshotTierCommand.ts | 100 + clients/client-ec2/src/commands/index.ts | 5 + clients/client-ec2/src/models/models_0.ts | 37 + clients/client-ec2/src/models/models_1.ts | 68 +- clients/client-ec2/src/models/models_2.ts | 105 +- clients/client-ec2/src/models/models_3.ts | 415 +- clients/client-ec2/src/models/models_4.ts | 562 +- clients/client-ec2/src/models/models_5.ts | 511 +- .../DescribeSnapshotTierStatusPaginator.ts | 59 + .../ListSnapshotsInRecycleBinPaginator.ts | 59 + clients/client-ec2/src/pagination/index.ts | 2 + clients/client-ec2/src/protocols/Aws_ec2.ts | 799 +- clients/client-ecr/src/ECR.ts | 225 + clients/client-ecr/src/ECRClient.ts | 36 + ...tRepositoryScanningConfigurationCommand.ts | 110 + .../CreatePullThroughCacheRuleCommand.ts | 99 + .../DeletePullThroughCacheRuleCommand.ts | 98 + .../DescribePullThroughCacheRulesCommand.ts | 100 + ...GetRegistryScanningConfigurationCommand.ts | 103 + ...PutRegistryScanningConfigurationCommand.ts | 103 + clients/client-ecr/src/commands/index.ts | 6 + clients/client-ecr/src/models/models_0.ts | 1304 +++- .../DescribePullThroughCacheRulesPaginator.ts | 59 + clients/client-ecr/src/pagination/index.ts | 1 + .../client-ecr/src/protocols/Aws_json1_1.ts | 1474 +++- clients/client-evidently/.gitignore | 9 + clients/client-evidently/LICENSE | 201 + clients/client-evidently/README.md | 211 + clients/client-evidently/jest.config.js | 4 + clients/client-evidently/package.json | 94 + clients/client-evidently/src/Evidently.ts | 1213 +++ .../client-evidently/src/EvidentlyClient.ts | 362 + .../commands/BatchEvaluateFeatureCommand.ts | 109 + .../src/commands/CreateExperimentCommand.ts | 102 + .../src/commands/CreateFeatureCommand.ts | 99 + .../src/commands/CreateLaunchCommand.ts | 102 + .../src/commands/CreateProjectCommand.ts | 97 + .../src/commands/DeleteExperimentCommand.ts | 96 + .../src/commands/DeleteFeatureCommand.ts | 95 + .../src/commands/DeleteLaunchCommand.ts | 96 + .../src/commands/DeleteProjectCommand.ts | 96 + .../src/commands/EvaluateFeatureCommand.ts | 109 + .../src/commands/GetExperimentCommand.ts | 96 + .../commands/GetExperimentResultsCommand.ts | 95 + .../src/commands/GetFeatureCommand.ts | 96 + .../src/commands/GetLaunchCommand.ts | 96 + .../src/commands/GetProjectCommand.ts | 96 + .../src/commands/ListExperimentsCommand.ts | 95 + .../src/commands/ListFeaturesCommand.ts | 95 + .../src/commands/ListLaunchesCommand.ts | 95 + .../src/commands/ListProjectsCommand.ts | 96 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/PutProjectEventsCommand.ts | 96 + .../src/commands/StartExperimentCommand.ts | 96 + .../src/commands/StartLaunchCommand.ts | 96 + .../src/commands/StopExperimentCommand.ts | 96 + .../src/commands/StopLaunchCommand.ts | 99 + .../src/commands/TagResourceCommand.ts | 107 + .../src/commands/UntagResourceCommand.ts | 95 + .../src/commands/UpdateExperimentCommand.ts | 97 + .../src/commands/UpdateFeatureCommand.ts | 97 + .../src/commands/UpdateLaunchCommand.ts | 97 + .../src/commands/UpdateProjectCommand.ts | 100 + .../UpdateProjectDataDeliveryCommand.ts | 102 + .../client-evidently/src/commands/index.ts | 32 + clients/client-evidently/src/endpoints.ts | 207 + clients/client-evidently/src/index.ts | 5 + clients/client-evidently/src/models/index.ts | 1 + .../client-evidently/src/models/models_0.ts | 3548 +++++++++ .../src/pagination/Interfaces.ts | 8 + .../pagination/ListExperimentsPaginator.ts | 59 + .../src/pagination/ListFeaturesPaginator.ts | 59 + .../src/pagination/ListLaunchesPaginator.ts | 59 + .../src/pagination/ListProjectsPaginator.ts | 59 + .../client-evidently/src/pagination/index.ts | 5 + .../src/protocols/Aws_restJson1.ts | 5344 +++++++++++++ .../src/runtimeConfig.browser.ts | 44 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + clients/client-evidently/src/runtimeConfig.ts | 53 + clients/client-evidently/tsconfig.es.json | 10 + clients/client-evidently/tsconfig.json | 32 + clients/client-evidently/tsconfig.types.json | 10 + clients/client-fsx/src/FSx.ts | 852 +- clients/client-fsx/src/FSxClient.ts | 48 + .../src/commands/CopyBackupCommand.ts | 24 +- .../src/commands/CreateBackupCommand.ts | 52 +- .../CreateDataRepositoryAssociationCommand.ts | 113 + .../CreateDataRepositoryTaskCommand.ts | 11 +- .../src/commands/CreateFileSystemCommand.ts | 52 +- .../CreateFileSystemFromBackupCommand.ts | 28 +- .../src/commands/CreateSnapshotCommand.ts | 122 + .../src/commands/CreateVolumeCommand.ts | 3 +- .../src/commands/DeleteBackupCommand.ts | 7 +- .../DeleteDataRepositoryAssociationCommand.ts | 109 + .../src/commands/DeleteFileSystemCommand.ts | 29 +- .../src/commands/DeleteSnapshotCommand.ts | 99 + .../src/commands/DeleteVolumeCommand.ts | 6 +- .../src/commands/DescribeBackupsCommand.ts | 35 +- ...scribeDataRepositoryAssociationsCommand.ts | 124 + .../commands/DescribeFileSystemsCommand.ts | 18 +- .../src/commands/DescribeSnapshotsCommand.ts | 121 + .../src/commands/DescribeVolumesCommand.ts | 3 +- .../ReleaseFileSystemNfsV3LocksCommand.ts | 101 + .../RestoreVolumeFromSnapshotCommand.ts | 99 + .../UpdateDataRepositoryAssociationCommand.ts | 105 + .../src/commands/UpdateFileSystemCommand.ts | 117 +- .../src/commands/UpdateSnapshotCommand.ts | 95 + .../src/commands/UpdateVolumeCommand.ts | 2 +- clients/client-fsx/src/commands/index.ts | 10 + clients/client-fsx/src/models/models_0.ts | 4177 +++++++--- ...ribeDataRepositoryAssociationsPaginator.ts | 59 + .../pagination/DescribeSnapshotsPaginator.ts | 59 + clients/client-fsx/src/pagination/index.ts | 2 + .../client-fsx/src/protocols/Aws_json1_1.ts | 2585 +++++- .../src/commands/GetDatabaseCommand.ts | 3 +- clients/client-glue/src/models/models_0.ts | 137 +- clients/client-glue/src/models/models_1.ts | 118 +- .../client-glue/src/protocols/Aws_json1_1.ts | 136 +- clients/client-inspector2/.gitignore | 9 + clients/client-inspector2/LICENSE | 201 + clients/client-inspector2/README.md | 204 + clients/client-inspector2/jest.config.js | 4 + clients/client-inspector2/package.json | 96 + clients/client-inspector2/src/Inspector2.ts | 1048 +++ .../client-inspector2/src/Inspector2Client.ts | 382 + .../src/commands/AssociateMemberCommand.ts | 95 + .../commands/BatchGetAccountStatusCommand.ts | 95 + .../commands/BatchGetFreeTrialInfoCommand.ts | 95 + .../commands/CancelFindingsReportCommand.ts | 95 + .../src/commands/CreateFilterCommand.ts | 95 + .../commands/CreateFindingsReportCommand.ts | 95 + .../src/commands/DeleteFilterCommand.ts | 95 + ...escribeOrganizationConfigurationCommand.ts | 106 + .../src/commands/DisableCommand.ts | 96 + .../DisableDelegatedAdminAccountCommand.ts | 100 + .../src/commands/DisassociateMemberCommand.ts | 95 + .../src/commands/EnableCommand.ts | 88 + .../EnableDelegatedAdminAccountCommand.ts | 100 + .../GetDelegatedAdminAccountCommand.ts | 96 + .../GetFindingsReportStatusCommand.ts | 95 + .../src/commands/GetMemberCommand.ts | 95 + .../commands/ListAccountPermissionsCommand.ts | 95 + .../src/commands/ListCoverageCommand.ts | 95 + .../commands/ListCoverageStatisticsCommand.ts | 95 + .../ListDelegatedAdminAccountsCommand.ts | 99 + .../src/commands/ListFiltersCommand.ts | 95 + .../ListFindingAggregationsCommand.ts | 95 + .../src/commands/ListFindingsCommand.ts | 95 + .../src/commands/ListMembersCommand.ts | 96 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/ListUsageTotalsCommand.ts | 95 + .../src/commands/TagResourceCommand.ts | 95 + .../src/commands/UntagResourceCommand.ts | 95 + .../src/commands/UpdateFilterCommand.ts | 95 + .../UpdateOrganizationConfigurationCommand.ts | 103 + .../client-inspector2/src/commands/index.ts | 30 + clients/client-inspector2/src/endpoints.ts | 134 + clients/client-inspector2/src/index.ts | 5 + clients/client-inspector2/src/models/index.ts | 1 + .../client-inspector2/src/models/models_0.ts | 4850 ++++++++++++ .../src/pagination/Interfaces.ts | 8 + .../ListAccountPermissionsPaginator.ts | 59 + .../src/pagination/ListCoveragePaginator.ts | 59 + .../ListCoverageStatisticsPaginator.ts | 58 + .../ListDelegatedAdminAccountsPaginator.ts | 59 + .../src/pagination/ListFiltersPaginator.ts | 55 + .../ListFindingAggregationsPaginator.ts | 59 + .../src/pagination/ListFindingsPaginator.ts | 59 + .../src/pagination/ListMembersPaginator.ts | 55 + .../pagination/ListUsageTotalsPaginator.ts | 59 + .../client-inspector2/src/pagination/index.ts | 10 + .../src/protocols/Aws_restJson1.ts | 5747 ++++++++++++++ .../src/runtimeConfig.browser.ts | 44 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + .../client-inspector2/src/runtimeConfig.ts | 53 + clients/client-inspector2/tsconfig.es.json | 10 + clients/client-inspector2/tsconfig.json | 32 + clients/client-inspector2/tsconfig.types.json | 10 + .../ListThingRegistrationTasksCommand.ts | 3 +- .../src/commands/ListThingsCommand.ts | 3 +- clients/client-iot/src/models/models_1.ts | 119 +- clients/client-iot/src/models/models_2.ts | 83 +- .../client-iot/src/protocols/Aws_restJson1.ts | 7 + .../client-iotsitewise/src/models/models_0.ts | 76 +- .../src/protocols/Aws_restJson1.ts | 27 + clients/client-iottwinmaker/.gitignore | 9 + clients/client-iottwinmaker/LICENSE | 201 + clients/client-iottwinmaker/README.md | 212 + clients/client-iottwinmaker/jest.config.js | 4 + clients/client-iottwinmaker/package.json | 94 + .../client-iottwinmaker/src/IoTTwinMaker.ts | 902 +++ .../src/IoTTwinMakerClient.ts | 351 + .../commands/BatchPutPropertyValuesCommand.ts | 95 + .../commands/CreateComponentTypeCommand.ts | 100 + .../src/commands/CreateEntityCommand.ts | 95 + .../src/commands/CreateSceneCommand.ts | 95 + .../src/commands/CreateWorkspaceCommand.ts | 95 + .../commands/DeleteComponentTypeCommand.ts | 95 + .../src/commands/DeleteEntityCommand.ts | 95 + .../src/commands/DeleteSceneCommand.ts | 95 + .../src/commands/DeleteWorkspaceCommand.ts | 95 + .../src/commands/GetComponentTypeCommand.ts | 95 + .../src/commands/GetEntityCommand.ts | 95 + .../src/commands/GetPropertyValueCommand.ts | 96 + .../GetPropertyValueHistoryCommand.ts | 97 + .../src/commands/GetSceneCommand.ts | 95 + .../src/commands/GetWorkspaceCommand.ts | 95 + .../src/commands/ListComponentTypesCommand.ts | 95 + .../src/commands/ListEntitiesCommand.ts | 95 + .../src/commands/ListScenesCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/ListWorkspacesCommand.ts | 95 + .../src/commands/TagResourceCommand.ts | 95 + .../src/commands/UntagResourceCommand.ts | 95 + .../commands/UpdateComponentTypeCommand.ts | 95 + .../src/commands/UpdateEntityCommand.ts | 95 + .../src/commands/UpdateSceneCommand.ts | 95 + .../src/commands/UpdateWorkspaceCommand.ts | 95 + .../client-iottwinmaker/src/commands/index.ts | 26 + clients/client-iottwinmaker/src/endpoints.ts | 134 + clients/client-iottwinmaker/src/index.ts | 5 + .../client-iottwinmaker/src/models/index.ts | 1 + .../src/models/models_0.ts | 2856 +++++++ .../GetPropertyValueHistoryPaginator.ts | 59 + .../src/pagination/Interfaces.ts | 8 + .../pagination/ListComponentTypesPaginator.ts | 59 + .../src/pagination/ListEntitiesPaginator.ts | 59 + .../src/pagination/ListScenesPaginator.ts | 55 + .../src/pagination/ListWorkspacesPaginator.ts | 59 + .../src/pagination/index.ts | 6 + .../src/protocols/Aws_restJson1.ts | 5066 ++++++++++++ .../src/runtimeConfig.browser.ts | 44 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + .../client-iottwinmaker/src/runtimeConfig.ts | 53 + clients/client-iottwinmaker/tsconfig.es.json | 10 + clients/client-iottwinmaker/tsconfig.json | 32 + .../client-iottwinmaker/tsconfig.types.json | 10 + clients/client-kafka/src/Kafka.ts | 111 + clients/client-kafka/src/KafkaClient.ts | 9 + .../src/commands/CreateClusterV2Command.ts | 95 + .../src/commands/DescribeClusterV2Command.ts | 95 + .../src/commands/ListClustersV2Command.ts | 95 + clients/client-kafka/src/commands/index.ts | 3 + clients/client-kafka/src/models/models_0.ts | 633 +- .../src/pagination/ListClustersV2Paginator.ts | 59 + clients/client-kafka/src/pagination/index.ts | 1 + .../src/protocols/Aws_restJson1.ts | 724 +- clients/client-kinesis/README.md | 4 +- clients/client-kinesis/src/Kinesis.ts | 338 +- clients/client-kinesis/src/KinesisClient.ts | 11 +- .../src/commands/AddTagsToStreamCommand.ts | 6 +- .../src/commands/CreateStreamCommand.ts | 38 +- .../DecreaseStreamRetentionPeriodCommand.ts | 10 +- .../src/commands/DeleteStreamCommand.ts | 9 +- .../src/commands/DescribeStreamCommand.ts | 6 +- .../commands/DescribeStreamSummaryCommand.ts | 10 +- .../src/commands/GetRecordsCommand.ts | 48 +- .../src/commands/GetShardIteratorCommand.ts | 14 +- .../IncreaseStreamRetentionPeriodCommand.ts | 6 +- .../src/commands/ListShardsCommand.ts | 6 +- .../src/commands/ListStreamsCommand.ts | 2 +- .../src/commands/MergeShardsCommand.ts | 10 +- .../src/commands/PutRecordCommand.ts | 16 +- .../src/commands/PutRecordsCommand.ts | 14 +- .../src/commands/SplitShardCommand.ts | 36 +- .../commands/StartStreamEncryptionCommand.ts | 20 +- .../commands/StopStreamEncryptionCommand.ts | 17 +- .../src/commands/SubscribeToShardCommand.ts | 5 +- .../src/commands/UpdateShardCountCommand.ts | 21 +- .../src/commands/UpdateStreamModeCommand.ts | 98 + clients/client-kinesis/src/commands/index.ts | 1 + clients/client-kinesis/src/models/models_0.ts | 508 +- .../src/protocols/Aws_json1_1.ts | 173 + clients/client-lakeformation/README.md | 4 +- .../client-lakeformation/src/LakeFormation.ts | 742 +- .../src/LakeFormationClient.ts | 86 +- .../commands/AddLFTagsToResourceCommand.ts | 12 +- .../commands/BatchGrantPermissionsCommand.ts | 10 +- .../commands/BatchRevokePermissionsCommand.ts | 10 +- .../src/commands/CancelTransactionCommand.ts | 95 + .../src/commands/CommitTransactionCommand.ts | 95 + .../commands/CreateDataCellsFilterCommand.ts | 95 + .../src/commands/CreateLFTagCommand.ts | 12 +- .../commands/DeleteDataCellsFilterCommand.ts | 95 + .../src/commands/DeleteLFTagCommand.ts | 12 +- .../commands/DeleteObjectsOnCancelCommand.ts | 102 + .../src/commands/DeregisterResourceCommand.ts | 10 +- .../src/commands/DescribeResourceCommand.ts | 12 +- .../commands/DescribeTransactionCommand.ts | 95 + .../src/commands/ExtendTransactionCommand.ts | 97 + .../commands/GetDataLakeSettingsCommand.ts | 10 +- .../GetEffectivePermissionsForPathCommand.ts | 10 +- .../src/commands/GetLFTagCommand.ts | 11 +- .../src/commands/GetQueryStateCommand.ts | 95 + .../src/commands/GetQueryStatisticsCommand.ts | 95 + .../src/commands/GetResourceLFTagsCommand.ts | 12 +- .../src/commands/GetTableObjectsCommand.ts | 95 + .../src/commands/GetWorkUnitResultsCommand.ts | 95 + .../src/commands/GetWorkUnitsCommand.ts | 95 + .../src/commands/GrantPermissionsCommand.ts | 10 +- .../commands/ListDataCellsFilterCommand.ts | 95 + .../src/commands/ListLFTagsCommand.ts | 12 +- .../src/commands/ListPermissionsCommand.ts | 10 +- .../src/commands/ListResourcesCommand.ts | 10 +- .../ListTableStorageOptimizersCommand.ts | 98 + .../src/commands/ListTransactionsCommand.ts | 96 + .../commands/PutDataLakeSettingsCommand.ts | 10 +- .../src/commands/RegisterResourceCommand.ts | 14 +- .../RemoveLFTagsFromResourceCommand.ts | 12 +- .../src/commands/RevokePermissionsCommand.ts | 10 +- .../SearchDatabasesByLFTagsCommand.ts | 10 +- .../commands/SearchTablesByLFTagsCommand.ts | 12 +- .../src/commands/StartQueryPlanningCommand.ts | 97 + .../src/commands/StartTransactionCommand.ts | 95 + .../src/commands/UpdateLFTagCommand.ts | 12 +- .../src/commands/UpdateResourceCommand.ts | 12 +- .../src/commands/UpdateTableObjectsCommand.ts | 95 + .../UpdateTableStorageOptimizerCommand.ts | 100 + .../src/commands/index.ts | 19 + .../src/models/models_0.ts | 2234 +++++- .../pagination/GetTableObjectsPaginator.ts | 59 + .../src/pagination/GetWorkUnitsPaginator.ts | 59 + .../ListDataCellsFilterPaginator.ts | 59 + .../src/pagination/ListLFTagsPaginator.ts | 55 + .../ListTableStorageOptimizersPaginator.ts | 59 + .../pagination/ListTransactionsPaginator.ts | 59 + .../SearchDatabasesByLFTagsPaginator.ts | 59 + .../SearchTablesByLFTagsPaginator.ts | 59 + .../src/pagination/index.ts | 8 + .../src/protocols/Aws_json1_1.ts | 3906 --------- .../src/protocols/Aws_restJson1.ts | 6580 ++++++++++++++++ .../client-outposts/src/models/models_0.ts | 19 + .../src/protocols/Aws_restJson1.ts | 3 + clients/client-rbin/.gitignore | 9 + clients/client-rbin/LICENSE | 201 + clients/client-rbin/README.md | 216 + clients/client-rbin/jest.config.js | 4 + clients/client-rbin/package.json | 94 + clients/client-rbin/src/Rbin.ts | 260 + clients/client-rbin/src/RbinClient.ts | 286 + .../src/commands/CreateRuleCommand.ts | 96 + .../src/commands/DeleteRuleCommand.ts | 96 + .../src/commands/GetRuleCommand.ts | 91 + .../src/commands/ListRulesCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/TagResourceCommand.ts | 95 + .../src/commands/UntagResourceCommand.ts | 95 + .../src/commands/UpdateRuleCommand.ts | 96 + clients/client-rbin/src/commands/index.ts | 8 + clients/client-rbin/src/endpoints.ts | 134 + clients/client-rbin/src/index.ts | 5 + clients/client-rbin/src/models/index.ts | 1 + clients/client-rbin/src/models/models_0.ts | 619 ++ .../client-rbin/src/pagination/Interfaces.ts | 8 + .../src/pagination/ListRulesPaginator.ts | 55 + clients/client-rbin/src/pagination/index.ts | 2 + .../src/protocols/Aws_restJson1.ts | 1171 +++ .../client-rbin/src/runtimeConfig.browser.ts | 44 + .../client-rbin/src/runtimeConfig.native.ts | 17 + .../client-rbin/src/runtimeConfig.shared.ts | 17 + clients/client-rbin/src/runtimeConfig.ts | 53 + clients/client-rbin/tsconfig.es.json | 10 + clients/client-rbin/tsconfig.json | 32 + clients/client-rbin/tsconfig.types.json | 10 + .../client-redshift-data/src/RedshiftData.ts | 60 +- .../commands/BatchExecuteStatementCommand.ts | 10 +- .../src/commands/DescribeTableCommand.ts | 10 +- .../src/commands/ExecuteStatementCommand.ts | 10 +- .../src/commands/ListDatabasesCommand.ts | 10 +- .../src/commands/ListSchemasCommand.ts | 10 +- .../src/commands/ListTablesCommand.ts | 10 +- .../src/models/models_0.ts | 63 +- .../src/protocols/Aws_json1_1.ts | 65 + clients/client-rum/.gitignore | 9 + clients/client-rum/LICENSE | 201 + clients/client-rum/README.md | 211 + clients/client-rum/jest.config.js | 4 + clients/client-rum/package.json | 94 + clients/client-rum/src/RUM.ts | 408 + clients/client-rum/src/RUMClient.ts | 287 + .../src/commands/CreateAppMonitorCommand.ts | 103 + .../src/commands/DeleteAppMonitorCommand.ts | 95 + .../src/commands/GetAppMonitorCommand.ts | 95 + .../src/commands/GetAppMonitorDataCommand.ts | 96 + .../src/commands/ListAppMonitorsCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 95 + .../src/commands/PutRumEventsCommand.ts | 98 + .../src/commands/TagResourceCommand.ts | 108 + .../src/commands/UntagResourceCommand.ts | 95 + .../src/commands/UpdateAppMonitorCommand.ts | 105 + clients/client-rum/src/commands/index.ts | 10 + clients/client-rum/src/endpoints.ts | 134 + clients/client-rum/src/index.ts | 5 + clients/client-rum/src/models/index.ts | 1 + clients/client-rum/src/models/models_0.ts | 1016 +++ .../pagination/GetAppMonitorDataPaginator.ts | 59 + .../client-rum/src/pagination/Interfaces.ts | 8 + .../pagination/ListAppMonitorsPaginator.ts | 59 + clients/client-rum/src/pagination/index.ts | 3 + .../client-rum/src/protocols/Aws_restJson1.ts | 1728 ++++ .../client-rum/src/runtimeConfig.browser.ts | 44 + .../client-rum/src/runtimeConfig.native.ts | 17 + .../client-rum/src/runtimeConfig.shared.ts | 17 + clients/client-rum/src/runtimeConfig.ts | 53 + clients/client-rum/tsconfig.es.json | 10 + clients/client-rum/tsconfig.json | 32 + clients/client-rum/tsconfig.types.json | 10 + clients/client-s3/src/S3.ts | 147 +- .../src/commands/CopyObjectCommand.ts | 12 +- .../src/commands/CreateBucketCommand.ts | 50 +- ...tIntelligentTieringConfigurationCommand.ts | 4 +- .../src/commands/GetBucketAclCommand.ts | 7 + ...tIntelligentTieringConfigurationCommand.ts | 4 +- .../GetBucketOwnershipControlsCommand.ts | 6 +- .../src/commands/GetObjectAclCommand.ts | 8 +- ...IntelligentTieringConfigurationsCommand.ts | 4 +- .../src/commands/PutBucketAclCommand.ts | 8 +- ...tIntelligentTieringConfigurationCommand.ts | 4 +- .../src/commands/PutBucketLoggingCommand.ts | 11 +- .../PutBucketOwnershipControlsCommand.ts | 4 +- .../src/commands/PutObjectAclCommand.ts | 9 +- .../src/commands/PutObjectCommand.ts | 16 +- clients/client-s3/src/models/models_0.ts | 101 +- clients/client-s3/src/models/models_1.ts | 20 +- .../client-s3/src/protocols/Aws_restXml.ts | 36 +- clients/client-snowball/README.md | 13 +- clients/client-snowball/src/Snowball.ts | 63 +- clients/client-snowball/src/SnowballClient.ts | 13 +- .../src/commands/CreateJobCommand.ts | 19 +- .../commands/CreateLongTermPricingCommand.ts | 3 +- .../CreateReturnShippingLabelCommand.ts | 2 +- .../DescribeReturnShippingLabelCommand.ts | 2 +- .../src/commands/GetJobManifestCommand.ts | 2 +- .../src/commands/GetJobUnlockCodeCommand.ts | 8 +- .../src/commands/GetSnowballUsageCommand.ts | 4 +- .../src/commands/GetSoftwareUpdatesCommand.ts | 2 +- .../commands/ListCompatibleImagesCommand.ts | 8 +- .../client-snowball/src/models/models_0.ts | 346 +- .../src/protocols/Aws_json1_1.ts | 30 + clients/client-ssm/README.md | 18 +- clients/client-ssm/src/SSM.ts | 168 +- clients/client-ssm/src/SSMClient.ts | 18 +- .../src/commands/AddTagsToResourceCommand.ts | 10 +- .../src/commands/CreateActivationCommand.ts | 12 +- .../commands/CreateAssociationBatchCommand.ts | 12 +- .../src/commands/CreateAssociationCommand.ts | 20 +- .../src/commands/CreateDocumentCommand.ts | 2 +- .../src/commands/DeleteActivationCommand.ts | 5 +- .../src/commands/DeleteAssociationCommand.ts | 8 +- .../src/commands/DeleteDocumentCommand.ts | 4 +- .../commands/DeleteResourceDataSyncCommand.ts | 4 +- .../DeregisterManagedInstanceCommand.ts | 2 +- .../commands/DescribeActivationsCommand.ts | 2 +- .../commands/DescribeAssociationCommand.ts | 2 +- ...ibeEffectiveInstanceAssociationsCommand.ts | 2 +- ...scribeInstanceAssociationsStatusCommand.ts | 2 +- .../DescribeInstanceInformationCommand.ts | 13 +- .../DescribeInstancePatchStatesCommand.ts | 2 +- ...InstancePatchStatesForPatchGroupCommand.ts | 2 +- .../DescribeInstancePatchesCommand.ts | 4 +- ...cribeMaintenanceWindowsForTargetCommand.ts | 2 +- .../commands/GetCommandInvocationCommand.ts | 4 +- .../commands/GetConnectionStatusCommand.ts | 2 +- ...ployablePatchSnapshotForInstanceCommand.ts | 4 +- .../src/commands/GetInventoryCommand.ts | 2 +- .../src/commands/ListAssociationsCommand.ts | 2 +- .../commands/ListCommandInvocationsCommand.ts | 8 +- .../src/commands/PutComplianceItemsCommand.ts | 2 +- .../src/commands/PutInventoryCommand.ts | 2 +- .../src/commands/ResumeSessionCommand.ts | 2 +- .../src/commands/SendCommandCommand.ts | 2 +- .../src/commands/StartSessionCommand.ts | 2 +- .../src/commands/TerminateSessionCommand.ts | 2 +- .../UpdateAssociationStatusCommand.ts | 2 +- .../commands/UpdateDocumentMetadataCommand.ts | 3 +- .../UpdateManagedInstanceRoleCommand.ts | 4 +- clients/client-ssm/src/models/models_0.ts | 390 +- clients/client-ssm/src/models/models_1.ts | 385 +- clients/client-ssm/src/models/models_2.ts | 37 +- .../client-ssm/src/protocols/Aws_json1_1.ts | 4 +- .../src/models/models_0.ts | 25 +- .../src/protocols/Aws_json1_1.ts | 3 + clients/client-wellarchitected/README.md | 12 +- .../src/WellArchitected.ts | 369 +- .../src/WellArchitectedClient.ts | 36 +- .../src/commands/AssociateLensesCommand.ts | 10 + .../src/commands/CreateLensShareCommand.ts | 108 + .../src/commands/CreateLensVersionCommand.ts | 99 + .../src/commands/CreateWorkloadCommand.ts | 6 +- .../commands/CreateWorkloadShareCommand.ts | 6 +- .../src/commands/DeleteLensCommand.ts | 109 + .../src/commands/DeleteLensShareCommand.ts | 108 + .../src/commands/DisassociateLensesCommand.ts | 3 +- .../src/commands/ExportLensCommand.ts | 109 + .../src/commands/GetLensCommand.ts | 95 + .../src/commands/ImportLensCommand.ts | 113 + .../src/commands/ListLensSharesCommand.ts | 95 + .../src/commands/index.ts | 8 + .../src/models/models_0.ts | 941 ++- .../src/pagination/ListLensSharesPaginator.ts | 59 + .../src/pagination/index.ts | 1 + .../src/protocols/Aws_restJson1.ts | 1275 ++- clients/client-workspaces-web/.gitignore | 9 + clients/client-workspaces-web/LICENSE | 201 + clients/client-workspaces-web/README.md | 208 + clients/client-workspaces-web/jest.config.js | 4 + clients/client-workspaces-web/package.json | 96 + .../src/WorkSpacesWeb.ts | 1612 ++++ .../src/WorkSpacesWebClient.ts | 458 ++ .../AssociateBrowserSettingsCommand.ts | 95 + .../AssociateNetworkSettingsCommand.ts | 95 + .../commands/AssociateTrustStoreCommand.ts | 95 + .../commands/AssociateUserSettingsCommand.ts | 95 + .../commands/CreateBrowserSettingsCommand.ts | 97 + .../commands/CreateIdentityProviderCommand.ts | 95 + .../commands/CreateNetworkSettingsCommand.ts | 97 + .../src/commands/CreatePortalCommand.ts | 95 + .../src/commands/CreateTrustStoreCommand.ts | 99 + .../src/commands/CreateUserSettingsCommand.ts | 97 + .../commands/DeleteBrowserSettingsCommand.ts | 95 + .../commands/DeleteIdentityProviderCommand.ts | 95 + .../commands/DeleteNetworkSettingsCommand.ts | 95 + .../src/commands/DeletePortalCommand.ts | 95 + .../src/commands/DeleteTrustStoreCommand.ts | 95 + .../src/commands/DeleteUserSettingsCommand.ts | 95 + .../DisassociateBrowserSettingsCommand.ts | 100 + .../DisassociateNetworkSettingsCommand.ts | 100 + .../commands/DisassociateTrustStoreCommand.ts | 95 + .../DisassociateUserSettingsCommand.ts | 95 + .../src/commands/GetBrowserSettingsCommand.ts | 95 + .../commands/GetIdentityProviderCommand.ts | 95 + .../src/commands/GetNetworkSettingsCommand.ts | 95 + .../src/commands/GetPortalCommand.ts | 95 + ...GetPortalServiceProviderMetadataCommand.ts | 103 + .../GetTrustStoreCertificateCommand.ts | 95 + .../src/commands/GetTrustStoreCommand.ts | 95 + .../src/commands/GetUserSettingsCommand.ts | 95 + .../commands/ListBrowserSettingsCommand.ts | 95 + .../commands/ListIdentityProvidersCommand.ts | 95 + .../commands/ListNetworkSettingsCommand.ts | 95 + .../src/commands/ListPortalsCommand.ts | 95 + .../commands/ListTagsForResourceCommand.ts | 95 + .../ListTrustStoreCertificatesCommand.ts | 98 + .../src/commands/ListTrustStoresCommand.ts | 95 + .../src/commands/ListUserSettingsCommand.ts | 95 + .../src/commands/TagResourceCommand.ts | 95 + .../src/commands/UntagResourceCommand.ts | 95 + .../commands/UpdateBrowserSettingsCommand.ts | 95 + .../commands/UpdateIdentityProviderCommand.ts | 95 + .../commands/UpdateNetworkSettingsCommand.ts | 95 + .../src/commands/UpdatePortalCommand.ts | 95 + .../src/commands/UpdateTrustStoreCommand.ts | 95 + .../src/commands/UpdateUserSettingsCommand.ts | 95 + .../src/commands/index.ts | 44 + .../client-workspaces-web/src/endpoints.ts | 134 + clients/client-workspaces-web/src/index.ts | 5 + .../client-workspaces-web/src/models/index.ts | 1 + .../src/models/models_0.ts | 2954 +++++++ .../src/pagination/Interfaces.ts | 8 + .../ListBrowserSettingsPaginator.ts | 59 + .../ListIdentityProvidersPaginator.ts | 59 + .../ListNetworkSettingsPaginator.ts | 59 + .../src/pagination/ListPortalsPaginator.ts | 55 + .../ListTrustStoreCertificatesPaginator.ts | 59 + .../pagination/ListTrustStoresPaginator.ts | 59 + .../pagination/ListUserSettingsPaginator.ts | 59 + .../src/pagination/index.ts | 8 + .../src/protocols/Aws_restJson1.ts | 6344 +++++++++++++++ .../src/runtimeConfig.browser.ts | 44 + .../src/runtimeConfig.native.ts | 17 + .../src/runtimeConfig.shared.ts | 17 + .../src/runtimeConfig.ts | 53 + .../client-workspaces-web/tsconfig.es.json | 10 + clients/client-workspaces-web/tsconfig.json | 32 + .../client-workspaces-web/tsconfig.types.json | 10 + .../aws-models/accessanalyzer.json | 77 +- .../aws-models/backup-gateway.json | 1543 ++++ .../aws-models/compute-optimizer.json | 958 ++- .../sdk-codegen/aws-models/dataexchange.json | 439 +- codegen/sdk-codegen/aws-models/ec2.json | 825 +- codegen/sdk-codegen/aws-models/ecr.json | 1596 +++- codegen/sdk-codegen/aws-models/evidently.json | 5309 +++++++++++++ codegen/sdk-codegen/aws-models/fsx.json | 2827 ++++++- codegen/sdk-codegen/aws-models/glue.json | 159 +- .../sdk-codegen/aws-models/inspector2.json | 6947 +++++++++++++++++ codegen/sdk-codegen/aws-models/iot.json | 54 +- .../sdk-codegen/aws-models/iotsitewise.json | 55 +- .../sdk-codegen/aws-models/iottwinmaker.json | 4422 +++++++++++ codegen/sdk-codegen/aws-models/kafka.json | 666 +- codegen/sdk-codegen/aws-models/kinesis.json | 385 +- .../sdk-codegen/aws-models/lakeformation.json | 3135 +++++++- codegen/sdk-codegen/aws-models/outposts.json | 27 + codegen/sdk-codegen/aws-models/rbin.json | 1014 +++ .../sdk-codegen/aws-models/redshift-data.json | 81 +- codegen/sdk-codegen/aws-models/rum.json | 1536 ++++ codegen/sdk-codegen/aws-models/s3.json | 115 +- codegen/sdk-codegen/aws-models/snowball.json | 242 +- codegen/sdk-codegen/aws-models/ssm.json | 551 +- .../aws-models/storage-gateway.json | 73 +- .../aws-models/wellarchitected.json | 1210 ++- .../aws-models/workspaces-web.json | 4446 +++++++++++ .../aws/typescript/codegen/endpoints.json | 104 +- 693 files changed, 150746 insertions(+), 10652 deletions(-) create mode 100644 clients/client-backup-gateway/.gitignore create mode 100644 clients/client-backup-gateway/LICENSE create mode 100644 clients/client-backup-gateway/README.md create mode 100644 clients/client-backup-gateway/jest.config.js create mode 100644 clients/client-backup-gateway/package.json create mode 100644 clients/client-backup-gateway/src/BackupGateway.ts create mode 100644 clients/client-backup-gateway/src/BackupGatewayClient.ts create mode 100644 clients/client-backup-gateway/src/commands/AssociateGatewayToServerCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/CreateGatewayCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/DeleteGatewayCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/DeleteHypervisorCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/DisassociateGatewayFromServerCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/ImportHypervisorConfigurationCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/ListGatewaysCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/ListHypervisorsCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/ListVirtualMachinesCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/PutMaintenanceStartTimeCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/TagResourceCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/TestHypervisorConfigurationCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/UpdateGatewayInformationCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/UpdateHypervisorCommand.ts create mode 100644 clients/client-backup-gateway/src/commands/index.ts create mode 100644 clients/client-backup-gateway/src/endpoints.ts create mode 100644 clients/client-backup-gateway/src/index.ts create mode 100644 clients/client-backup-gateway/src/models/index.ts create mode 100644 clients/client-backup-gateway/src/models/models_0.ts create mode 100644 clients/client-backup-gateway/src/pagination/Interfaces.ts create mode 100644 clients/client-backup-gateway/src/pagination/ListGatewaysPaginator.ts create mode 100644 clients/client-backup-gateway/src/pagination/ListHypervisorsPaginator.ts create mode 100644 clients/client-backup-gateway/src/pagination/ListVirtualMachinesPaginator.ts create mode 100644 clients/client-backup-gateway/src/pagination/index.ts create mode 100644 clients/client-backup-gateway/src/protocols/Aws_json1_0.ts create mode 100644 clients/client-backup-gateway/src/runtimeConfig.browser.ts create mode 100644 clients/client-backup-gateway/src/runtimeConfig.native.ts create mode 100644 clients/client-backup-gateway/src/runtimeConfig.shared.ts create mode 100644 clients/client-backup-gateway/src/runtimeConfig.ts create mode 100644 clients/client-backup-gateway/tsconfig.es.json create mode 100644 clients/client-backup-gateway/tsconfig.json create mode 100644 clients/client-backup-gateway/tsconfig.types.json create mode 100644 clients/client-compute-optimizer/src/commands/DeleteRecommendationPreferencesCommand.ts create mode 100644 clients/client-compute-optimizer/src/commands/GetEffectiveRecommendationPreferencesCommand.ts create mode 100644 clients/client-compute-optimizer/src/commands/GetRecommendationPreferencesCommand.ts create mode 100644 clients/client-compute-optimizer/src/commands/PutRecommendationPreferencesCommand.ts create mode 100644 clients/client-dataexchange/src/commands/SendApiAssetCommand.ts create mode 100644 clients/client-ec2/src/commands/DescribeSnapshotTierStatusCommand.ts create mode 100644 clients/client-ec2/src/commands/ListSnapshotsInRecycleBinCommand.ts create mode 100644 clients/client-ec2/src/commands/ModifySnapshotTierCommand.ts create mode 100644 clients/client-ec2/src/commands/RestoreSnapshotFromRecycleBinCommand.ts create mode 100644 clients/client-ec2/src/commands/RestoreSnapshotTierCommand.ts create mode 100644 clients/client-ec2/src/pagination/DescribeSnapshotTierStatusPaginator.ts create mode 100644 clients/client-ec2/src/pagination/ListSnapshotsInRecycleBinPaginator.ts create mode 100644 clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts create mode 100644 clients/client-ecr/src/commands/CreatePullThroughCacheRuleCommand.ts create mode 100644 clients/client-ecr/src/commands/DeletePullThroughCacheRuleCommand.ts create mode 100644 clients/client-ecr/src/commands/DescribePullThroughCacheRulesCommand.ts create mode 100644 clients/client-ecr/src/commands/GetRegistryScanningConfigurationCommand.ts create mode 100644 clients/client-ecr/src/commands/PutRegistryScanningConfigurationCommand.ts create mode 100644 clients/client-ecr/src/pagination/DescribePullThroughCacheRulesPaginator.ts create mode 100644 clients/client-evidently/.gitignore create mode 100644 clients/client-evidently/LICENSE create mode 100644 clients/client-evidently/README.md create mode 100644 clients/client-evidently/jest.config.js create mode 100644 clients/client-evidently/package.json create mode 100644 clients/client-evidently/src/Evidently.ts create mode 100644 clients/client-evidently/src/EvidentlyClient.ts create mode 100644 clients/client-evidently/src/commands/BatchEvaluateFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/CreateExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/CreateFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/CreateLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/CreateProjectCommand.ts create mode 100644 clients/client-evidently/src/commands/DeleteExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/DeleteFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/DeleteLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/DeleteProjectCommand.ts create mode 100644 clients/client-evidently/src/commands/EvaluateFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/GetExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/GetExperimentResultsCommand.ts create mode 100644 clients/client-evidently/src/commands/GetFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/GetLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/GetProjectCommand.ts create mode 100644 clients/client-evidently/src/commands/ListExperimentsCommand.ts create mode 100644 clients/client-evidently/src/commands/ListFeaturesCommand.ts create mode 100644 clients/client-evidently/src/commands/ListLaunchesCommand.ts create mode 100644 clients/client-evidently/src/commands/ListProjectsCommand.ts create mode 100644 clients/client-evidently/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-evidently/src/commands/PutProjectEventsCommand.ts create mode 100644 clients/client-evidently/src/commands/StartExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/StartLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/StopExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/StopLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/TagResourceCommand.ts create mode 100644 clients/client-evidently/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-evidently/src/commands/UpdateExperimentCommand.ts create mode 100644 clients/client-evidently/src/commands/UpdateFeatureCommand.ts create mode 100644 clients/client-evidently/src/commands/UpdateLaunchCommand.ts create mode 100644 clients/client-evidently/src/commands/UpdateProjectCommand.ts create mode 100644 clients/client-evidently/src/commands/UpdateProjectDataDeliveryCommand.ts create mode 100644 clients/client-evidently/src/commands/index.ts create mode 100644 clients/client-evidently/src/endpoints.ts create mode 100644 clients/client-evidently/src/index.ts create mode 100644 clients/client-evidently/src/models/index.ts create mode 100644 clients/client-evidently/src/models/models_0.ts create mode 100644 clients/client-evidently/src/pagination/Interfaces.ts create mode 100644 clients/client-evidently/src/pagination/ListExperimentsPaginator.ts create mode 100644 clients/client-evidently/src/pagination/ListFeaturesPaginator.ts create mode 100644 clients/client-evidently/src/pagination/ListLaunchesPaginator.ts create mode 100644 clients/client-evidently/src/pagination/ListProjectsPaginator.ts create mode 100644 clients/client-evidently/src/pagination/index.ts create mode 100644 clients/client-evidently/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-evidently/src/runtimeConfig.browser.ts create mode 100644 clients/client-evidently/src/runtimeConfig.native.ts create mode 100644 clients/client-evidently/src/runtimeConfig.shared.ts create mode 100644 clients/client-evidently/src/runtimeConfig.ts create mode 100644 clients/client-evidently/tsconfig.es.json create mode 100644 clients/client-evidently/tsconfig.json create mode 100644 clients/client-evidently/tsconfig.types.json create mode 100644 clients/client-fsx/src/commands/CreateDataRepositoryAssociationCommand.ts create mode 100644 clients/client-fsx/src/commands/CreateSnapshotCommand.ts create mode 100644 clients/client-fsx/src/commands/DeleteDataRepositoryAssociationCommand.ts create mode 100644 clients/client-fsx/src/commands/DeleteSnapshotCommand.ts create mode 100644 clients/client-fsx/src/commands/DescribeDataRepositoryAssociationsCommand.ts create mode 100644 clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts create mode 100644 clients/client-fsx/src/commands/ReleaseFileSystemNfsV3LocksCommand.ts create mode 100644 clients/client-fsx/src/commands/RestoreVolumeFromSnapshotCommand.ts create mode 100644 clients/client-fsx/src/commands/UpdateDataRepositoryAssociationCommand.ts create mode 100644 clients/client-fsx/src/commands/UpdateSnapshotCommand.ts create mode 100644 clients/client-fsx/src/pagination/DescribeDataRepositoryAssociationsPaginator.ts create mode 100644 clients/client-fsx/src/pagination/DescribeSnapshotsPaginator.ts create mode 100644 clients/client-inspector2/.gitignore create mode 100644 clients/client-inspector2/LICENSE create mode 100644 clients/client-inspector2/README.md create mode 100644 clients/client-inspector2/jest.config.js create mode 100644 clients/client-inspector2/package.json create mode 100644 clients/client-inspector2/src/Inspector2.ts create mode 100644 clients/client-inspector2/src/Inspector2Client.ts create mode 100644 clients/client-inspector2/src/commands/AssociateMemberCommand.ts create mode 100644 clients/client-inspector2/src/commands/BatchGetAccountStatusCommand.ts create mode 100644 clients/client-inspector2/src/commands/BatchGetFreeTrialInfoCommand.ts create mode 100644 clients/client-inspector2/src/commands/CancelFindingsReportCommand.ts create mode 100644 clients/client-inspector2/src/commands/CreateFilterCommand.ts create mode 100644 clients/client-inspector2/src/commands/CreateFindingsReportCommand.ts create mode 100644 clients/client-inspector2/src/commands/DeleteFilterCommand.ts create mode 100644 clients/client-inspector2/src/commands/DescribeOrganizationConfigurationCommand.ts create mode 100644 clients/client-inspector2/src/commands/DisableCommand.ts create mode 100644 clients/client-inspector2/src/commands/DisableDelegatedAdminAccountCommand.ts create mode 100644 clients/client-inspector2/src/commands/DisassociateMemberCommand.ts create mode 100644 clients/client-inspector2/src/commands/EnableCommand.ts create mode 100644 clients/client-inspector2/src/commands/EnableDelegatedAdminAccountCommand.ts create mode 100644 clients/client-inspector2/src/commands/GetDelegatedAdminAccountCommand.ts create mode 100644 clients/client-inspector2/src/commands/GetFindingsReportStatusCommand.ts create mode 100644 clients/client-inspector2/src/commands/GetMemberCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListAccountPermissionsCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListCoverageCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListCoverageStatisticsCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListDelegatedAdminAccountsCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListFiltersCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListFindingAggregationsCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListFindingsCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListMembersCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-inspector2/src/commands/ListUsageTotalsCommand.ts create mode 100644 clients/client-inspector2/src/commands/TagResourceCommand.ts create mode 100644 clients/client-inspector2/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-inspector2/src/commands/UpdateFilterCommand.ts create mode 100644 clients/client-inspector2/src/commands/UpdateOrganizationConfigurationCommand.ts create mode 100644 clients/client-inspector2/src/commands/index.ts create mode 100644 clients/client-inspector2/src/endpoints.ts create mode 100644 clients/client-inspector2/src/index.ts create mode 100644 clients/client-inspector2/src/models/index.ts create mode 100644 clients/client-inspector2/src/models/models_0.ts create mode 100644 clients/client-inspector2/src/pagination/Interfaces.ts create mode 100644 clients/client-inspector2/src/pagination/ListAccountPermissionsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListCoveragePaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListCoverageStatisticsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListDelegatedAdminAccountsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListFiltersPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListFindingAggregationsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListFindingsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListMembersPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/ListUsageTotalsPaginator.ts create mode 100644 clients/client-inspector2/src/pagination/index.ts create mode 100644 clients/client-inspector2/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-inspector2/src/runtimeConfig.browser.ts create mode 100644 clients/client-inspector2/src/runtimeConfig.native.ts create mode 100644 clients/client-inspector2/src/runtimeConfig.shared.ts create mode 100644 clients/client-inspector2/src/runtimeConfig.ts create mode 100644 clients/client-inspector2/tsconfig.es.json create mode 100644 clients/client-inspector2/tsconfig.json create mode 100644 clients/client-inspector2/tsconfig.types.json create mode 100644 clients/client-iottwinmaker/.gitignore create mode 100644 clients/client-iottwinmaker/LICENSE create mode 100644 clients/client-iottwinmaker/README.md create mode 100644 clients/client-iottwinmaker/jest.config.js create mode 100644 clients/client-iottwinmaker/package.json create mode 100644 clients/client-iottwinmaker/src/IoTTwinMaker.ts create mode 100644 clients/client-iottwinmaker/src/IoTTwinMakerClient.ts create mode 100644 clients/client-iottwinmaker/src/commands/BatchPutPropertyValuesCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/CreateComponentTypeCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/CreateEntityCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/CreateSceneCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/CreateWorkspaceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/DeleteComponentTypeCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/DeleteEntityCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/DeleteSceneCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/DeleteWorkspaceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetComponentTypeCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetEntityCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetPropertyValueCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetPropertyValueHistoryCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetSceneCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/GetWorkspaceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/ListComponentTypesCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/ListEntitiesCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/ListScenesCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/ListWorkspacesCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/TagResourceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/UpdateComponentTypeCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/UpdateEntityCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/UpdateSceneCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/UpdateWorkspaceCommand.ts create mode 100644 clients/client-iottwinmaker/src/commands/index.ts create mode 100644 clients/client-iottwinmaker/src/endpoints.ts create mode 100644 clients/client-iottwinmaker/src/index.ts create mode 100644 clients/client-iottwinmaker/src/models/index.ts create mode 100644 clients/client-iottwinmaker/src/models/models_0.ts create mode 100644 clients/client-iottwinmaker/src/pagination/GetPropertyValueHistoryPaginator.ts create mode 100644 clients/client-iottwinmaker/src/pagination/Interfaces.ts create mode 100644 clients/client-iottwinmaker/src/pagination/ListComponentTypesPaginator.ts create mode 100644 clients/client-iottwinmaker/src/pagination/ListEntitiesPaginator.ts create mode 100644 clients/client-iottwinmaker/src/pagination/ListScenesPaginator.ts create mode 100644 clients/client-iottwinmaker/src/pagination/ListWorkspacesPaginator.ts create mode 100644 clients/client-iottwinmaker/src/pagination/index.ts create mode 100644 clients/client-iottwinmaker/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-iottwinmaker/src/runtimeConfig.browser.ts create mode 100644 clients/client-iottwinmaker/src/runtimeConfig.native.ts create mode 100644 clients/client-iottwinmaker/src/runtimeConfig.shared.ts create mode 100644 clients/client-iottwinmaker/src/runtimeConfig.ts create mode 100644 clients/client-iottwinmaker/tsconfig.es.json create mode 100644 clients/client-iottwinmaker/tsconfig.json create mode 100644 clients/client-iottwinmaker/tsconfig.types.json create mode 100644 clients/client-kafka/src/commands/CreateClusterV2Command.ts create mode 100644 clients/client-kafka/src/commands/DescribeClusterV2Command.ts create mode 100644 clients/client-kafka/src/commands/ListClustersV2Command.ts create mode 100644 clients/client-kafka/src/pagination/ListClustersV2Paginator.ts create mode 100644 clients/client-kinesis/src/commands/UpdateStreamModeCommand.ts create mode 100644 clients/client-lakeformation/src/commands/CancelTransactionCommand.ts create mode 100644 clients/client-lakeformation/src/commands/CommitTransactionCommand.ts create mode 100644 clients/client-lakeformation/src/commands/CreateDataCellsFilterCommand.ts create mode 100644 clients/client-lakeformation/src/commands/DeleteDataCellsFilterCommand.ts create mode 100644 clients/client-lakeformation/src/commands/DeleteObjectsOnCancelCommand.ts create mode 100644 clients/client-lakeformation/src/commands/DescribeTransactionCommand.ts create mode 100644 clients/client-lakeformation/src/commands/ExtendTransactionCommand.ts create mode 100644 clients/client-lakeformation/src/commands/GetQueryStateCommand.ts create mode 100644 clients/client-lakeformation/src/commands/GetQueryStatisticsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/GetTableObjectsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/GetWorkUnitResultsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/GetWorkUnitsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/ListDataCellsFilterCommand.ts create mode 100644 clients/client-lakeformation/src/commands/ListTableStorageOptimizersCommand.ts create mode 100644 clients/client-lakeformation/src/commands/ListTransactionsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/StartQueryPlanningCommand.ts create mode 100644 clients/client-lakeformation/src/commands/StartTransactionCommand.ts create mode 100644 clients/client-lakeformation/src/commands/UpdateTableObjectsCommand.ts create mode 100644 clients/client-lakeformation/src/commands/UpdateTableStorageOptimizerCommand.ts create mode 100644 clients/client-lakeformation/src/pagination/GetTableObjectsPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/GetWorkUnitsPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/ListDataCellsFilterPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/ListLFTagsPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/ListTableStorageOptimizersPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/ListTransactionsPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/SearchDatabasesByLFTagsPaginator.ts create mode 100644 clients/client-lakeformation/src/pagination/SearchTablesByLFTagsPaginator.ts delete mode 100644 clients/client-lakeformation/src/protocols/Aws_json1_1.ts create mode 100644 clients/client-lakeformation/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-rbin/.gitignore create mode 100644 clients/client-rbin/LICENSE create mode 100644 clients/client-rbin/README.md create mode 100644 clients/client-rbin/jest.config.js create mode 100644 clients/client-rbin/package.json create mode 100644 clients/client-rbin/src/Rbin.ts create mode 100644 clients/client-rbin/src/RbinClient.ts create mode 100644 clients/client-rbin/src/commands/CreateRuleCommand.ts create mode 100644 clients/client-rbin/src/commands/DeleteRuleCommand.ts create mode 100644 clients/client-rbin/src/commands/GetRuleCommand.ts create mode 100644 clients/client-rbin/src/commands/ListRulesCommand.ts create mode 100644 clients/client-rbin/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-rbin/src/commands/TagResourceCommand.ts create mode 100644 clients/client-rbin/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-rbin/src/commands/UpdateRuleCommand.ts create mode 100644 clients/client-rbin/src/commands/index.ts create mode 100644 clients/client-rbin/src/endpoints.ts create mode 100644 clients/client-rbin/src/index.ts create mode 100644 clients/client-rbin/src/models/index.ts create mode 100644 clients/client-rbin/src/models/models_0.ts create mode 100644 clients/client-rbin/src/pagination/Interfaces.ts create mode 100644 clients/client-rbin/src/pagination/ListRulesPaginator.ts create mode 100644 clients/client-rbin/src/pagination/index.ts create mode 100644 clients/client-rbin/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-rbin/src/runtimeConfig.browser.ts create mode 100644 clients/client-rbin/src/runtimeConfig.native.ts create mode 100644 clients/client-rbin/src/runtimeConfig.shared.ts create mode 100644 clients/client-rbin/src/runtimeConfig.ts create mode 100644 clients/client-rbin/tsconfig.es.json create mode 100644 clients/client-rbin/tsconfig.json create mode 100644 clients/client-rbin/tsconfig.types.json create mode 100644 clients/client-rum/.gitignore create mode 100644 clients/client-rum/LICENSE create mode 100644 clients/client-rum/README.md create mode 100644 clients/client-rum/jest.config.js create mode 100644 clients/client-rum/package.json create mode 100644 clients/client-rum/src/RUM.ts create mode 100644 clients/client-rum/src/RUMClient.ts create mode 100644 clients/client-rum/src/commands/CreateAppMonitorCommand.ts create mode 100644 clients/client-rum/src/commands/DeleteAppMonitorCommand.ts create mode 100644 clients/client-rum/src/commands/GetAppMonitorCommand.ts create mode 100644 clients/client-rum/src/commands/GetAppMonitorDataCommand.ts create mode 100644 clients/client-rum/src/commands/ListAppMonitorsCommand.ts create mode 100644 clients/client-rum/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-rum/src/commands/PutRumEventsCommand.ts create mode 100644 clients/client-rum/src/commands/TagResourceCommand.ts create mode 100644 clients/client-rum/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-rum/src/commands/UpdateAppMonitorCommand.ts create mode 100644 clients/client-rum/src/commands/index.ts create mode 100644 clients/client-rum/src/endpoints.ts create mode 100644 clients/client-rum/src/index.ts create mode 100644 clients/client-rum/src/models/index.ts create mode 100644 clients/client-rum/src/models/models_0.ts create mode 100644 clients/client-rum/src/pagination/GetAppMonitorDataPaginator.ts create mode 100644 clients/client-rum/src/pagination/Interfaces.ts create mode 100644 clients/client-rum/src/pagination/ListAppMonitorsPaginator.ts create mode 100644 clients/client-rum/src/pagination/index.ts create mode 100644 clients/client-rum/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-rum/src/runtimeConfig.browser.ts create mode 100644 clients/client-rum/src/runtimeConfig.native.ts create mode 100644 clients/client-rum/src/runtimeConfig.shared.ts create mode 100644 clients/client-rum/src/runtimeConfig.ts create mode 100644 clients/client-rum/tsconfig.es.json create mode 100644 clients/client-rum/tsconfig.json create mode 100644 clients/client-rum/tsconfig.types.json create mode 100644 clients/client-wellarchitected/src/commands/CreateLensShareCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/CreateLensVersionCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/DeleteLensCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/DeleteLensShareCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/ExportLensCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/GetLensCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/ImportLensCommand.ts create mode 100644 clients/client-wellarchitected/src/commands/ListLensSharesCommand.ts create mode 100644 clients/client-wellarchitected/src/pagination/ListLensSharesPaginator.ts create mode 100644 clients/client-workspaces-web/.gitignore create mode 100644 clients/client-workspaces-web/LICENSE create mode 100644 clients/client-workspaces-web/README.md create mode 100644 clients/client-workspaces-web/jest.config.js create mode 100644 clients/client-workspaces-web/package.json create mode 100644 clients/client-workspaces-web/src/WorkSpacesWeb.ts create mode 100644 clients/client-workspaces-web/src/WorkSpacesWebClient.ts create mode 100644 clients/client-workspaces-web/src/commands/AssociateBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/AssociateNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/AssociateTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/AssociateUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreateBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreateIdentityProviderCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreateNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreatePortalCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreateTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/CreateUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeleteBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeleteIdentityProviderCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeleteNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeletePortalCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeleteTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DeleteUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DisassociateBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DisassociateNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DisassociateTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/DisassociateUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetIdentityProviderCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetPortalCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetPortalServiceProviderMetadataCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetTrustStoreCertificateCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/GetUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListIdentityProvidersCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListPortalsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListTagsForResourceCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListTrustStoreCertificatesCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListTrustStoresCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/ListUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/TagResourceCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UntagResourceCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdateBrowserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdateIdentityProviderCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdateNetworkSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdatePortalCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdateTrustStoreCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/UpdateUserSettingsCommand.ts create mode 100644 clients/client-workspaces-web/src/commands/index.ts create mode 100644 clients/client-workspaces-web/src/endpoints.ts create mode 100644 clients/client-workspaces-web/src/index.ts create mode 100644 clients/client-workspaces-web/src/models/index.ts create mode 100644 clients/client-workspaces-web/src/models/models_0.ts create mode 100644 clients/client-workspaces-web/src/pagination/Interfaces.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListBrowserSettingsPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListIdentityProvidersPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListNetworkSettingsPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListPortalsPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListTrustStoreCertificatesPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListTrustStoresPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/ListUserSettingsPaginator.ts create mode 100644 clients/client-workspaces-web/src/pagination/index.ts create mode 100644 clients/client-workspaces-web/src/protocols/Aws_restJson1.ts create mode 100644 clients/client-workspaces-web/src/runtimeConfig.browser.ts create mode 100644 clients/client-workspaces-web/src/runtimeConfig.native.ts create mode 100644 clients/client-workspaces-web/src/runtimeConfig.shared.ts create mode 100644 clients/client-workspaces-web/src/runtimeConfig.ts create mode 100644 clients/client-workspaces-web/tsconfig.es.json create mode 100644 clients/client-workspaces-web/tsconfig.json create mode 100644 clients/client-workspaces-web/tsconfig.types.json create mode 100644 codegen/sdk-codegen/aws-models/backup-gateway.json create mode 100644 codegen/sdk-codegen/aws-models/evidently.json create mode 100644 codegen/sdk-codegen/aws-models/inspector2.json create mode 100644 codegen/sdk-codegen/aws-models/iottwinmaker.json create mode 100644 codegen/sdk-codegen/aws-models/rbin.json create mode 100644 codegen/sdk-codegen/aws-models/rum.json create mode 100644 codegen/sdk-codegen/aws-models/workspaces-web.json diff --git a/clients/client-accessanalyzer/src/models/models_0.ts b/clients/client-accessanalyzer/src/models/models_0.ts index 57a326a074ef..fd538b054901 100644 --- a/clients/client-accessanalyzer/src/models/models_0.ts +++ b/clients/client-accessanalyzer/src/models/models_0.ts @@ -1321,14 +1321,14 @@ export namespace S3BucketConfiguration { * specify the policy, the access preview assumes a secret without a policy. To propose * deletion of an existing policy, you can specify an empty string. If the proposed * configuration is for a new secret and you do not specify the KMS key ID, the access - * preview uses the default CMK of the Amazon Web Services account. If you specify an empty string for the - * KMS key ID, the access preview uses the default CMK of the Amazon Web Services account. For more - * information about secret policy limits, see Quotas for + * preview uses the Amazon Web Services managed key aws/secretsmanager. If you specify an empty + * string for the KMS key ID, the access preview uses the Amazon Web Services managed key of the Amazon Web Services + * account. For more information about secret policy limits, see Quotas for * Secrets Manager..

*/ export interface SecretsManagerSecretConfiguration { /** - *

The proposed ARN, key ID, or alias of the KMS customer master key (CMK).

+ *

The proposed ARN, key ID, or alias of the KMS key.

*/ kmsKeyId?: string; @@ -3273,6 +3273,13 @@ export enum PolicyType { SERVICE_CONTROL_POLICY = "SERVICE_CONTROL_POLICY", } +export enum ValidatePolicyResourceType { + S3_ACCESS_POINT = "AWS::S3::AccessPoint", + S3_BUCKET = "AWS::S3::Bucket", + S3_MULTI_REGION_ACCESS_POINT = "AWS::S3::MultiRegionAccessPoint", + S3_OBJECT_LAMBDA_ACCESS_POINT = "AWS::S3ObjectLambda::AccessPoint", +} + export interface ValidatePolicyRequest { /** *

The locale to use for localizing the findings.

@@ -3305,6 +3312,18 @@ export interface ValidatePolicyRequest { * or Amazon S3 bucket policy.

*/ policyType: PolicyType | string | undefined; + + /** + *

The type of resource to attach to your resource policy. Specify a value for the policy + * validation resource type only if the policy type is RESOURCE_POLICY. For + * example, to validate a resource policy to attach to an Amazon S3 bucket, you can choose + * AWS::S3::Bucket for the policy validation resource type.

+ *

For resource types not supported as valid values, IAM Access Analyzer runs policy checks that + * apply to all resource policies. For example, to validate a resource policy to attach to a + * KMS key, do not specify a value for the policy validation resource type and IAM Access Analyzer + * will run policy checks that apply to all resource policies.

+ */ + validatePolicyResourceType?: ValidatePolicyResourceType | string; } export namespace ValidatePolicyRequest { diff --git a/clients/client-accessanalyzer/src/protocols/Aws_restJson1.ts b/clients/client-accessanalyzer/src/protocols/Aws_restJson1.ts index 95ef184b40fc..5b05abeb388d 100644 --- a/clients/client-accessanalyzer/src/protocols/Aws_restJson1.ts +++ b/clients/client-accessanalyzer/src/protocols/Aws_restJson1.ts @@ -1002,6 +1002,8 @@ export const serializeAws_restJson1ValidatePolicyCommand = async ( ...(input.policyDocument !== undefined && input.policyDocument !== null && { policyDocument: input.policyDocument }), ...(input.policyType !== undefined && input.policyType !== null && { policyType: input.policyType }), + ...(input.validatePolicyResourceType !== undefined && + input.validatePolicyResourceType !== null && { validatePolicyResourceType: input.validatePolicyResourceType }), }); return new __HttpRequest({ protocol, diff --git a/clients/client-backup-gateway/.gitignore b/clients/client-backup-gateway/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-backup-gateway/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-backup-gateway/LICENSE b/clients/client-backup-gateway/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-backup-gateway/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-backup-gateway/README.md b/clients/client-backup-gateway/README.md new file mode 100644 index 000000000000..c7d0267e482f --- /dev/null +++ b/clients/client-backup-gateway/README.md @@ -0,0 +1,212 @@ +# @aws-sdk/client-backup-gateway + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-backup-gateway/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-backup-gateway) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-backup-gateway.svg)](https://www.npmjs.com/package/@aws-sdk/client-backup-gateway) + +## Description + +AWS SDK for JavaScript BackupGateway Client for Node.js, Browser and React Native. + +Backup gateway + +

Backup gateway connects Backup to your hypervisor, so you can +create, store, and restore backups of your virtual machines (VMs) anywhere, whether +on-premises or in the VMware Cloud (VMC) on Amazon Web Services.

+

Add on-premises resources by connecting to a hypervisor through a gateway. Backup will automatically discover the resources in your hypervisor.

+

Use Backup to assign virtual or on-premises resources to a backup plan, or run +on-demand backups. Once you have backed up your resources, you can view them and restore them +like any resource supported by Backup.

+

To download the Amazon Web Services software to get started, navigate to the Backup console, choose Gateways, then choose Create gateway.

+ +## Installing + +To install the this package, simply type add or install @aws-sdk/client-backup-gateway +using your favorite package manager: + +- `npm install @aws-sdk/client-backup-gateway` +- `yarn add @aws-sdk/client-backup-gateway` +- `pnpm add @aws-sdk/client-backup-gateway` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `BackupGatewayClient` and +the commands you need, for example `AssociateGatewayToServerCommand`: + +```js +// ES5 example +const { BackupGatewayClient, AssociateGatewayToServerCommand } = require("@aws-sdk/client-backup-gateway"); +``` + +```ts +// ES6+ example +import { BackupGatewayClient, AssociateGatewayToServerCommand } from "@aws-sdk/client-backup-gateway"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new BackupGatewayClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new AssociateGatewayToServerCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-backup-gateway"; +const client = new AWS.BackupGateway({ region: "REGION" }); + +// async/await. +try { + const data = await client.associateGatewayToServer(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .associateGatewayToServer(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.associateGatewayToServer(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-backup-gateway` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-backup-gateway/jest.config.js b/clients/client-backup-gateway/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-backup-gateway/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-backup-gateway/package.json b/clients/client-backup-gateway/package.json new file mode 100644 index 000000000000..724c72a8f746 --- /dev/null +++ b/clients/client-backup-gateway/package.json @@ -0,0 +1,94 @@ +{ + "name": "@aws-sdk/client-backup-gateway", + "description": "AWS SDK for JavaScript Backup Gateway Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-backup-gateway", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-backup-gateway" + } +} diff --git a/clients/client-backup-gateway/src/BackupGateway.ts b/clients/client-backup-gateway/src/BackupGateway.ts new file mode 100644 index 000000000000..4a5451528e44 --- /dev/null +++ b/clients/client-backup-gateway/src/BackupGateway.ts @@ -0,0 +1,603 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { BackupGatewayClient } from "./BackupGatewayClient"; +import { + AssociateGatewayToServerCommand, + AssociateGatewayToServerCommandInput, + AssociateGatewayToServerCommandOutput, +} from "./commands/AssociateGatewayToServerCommand"; +import { + CreateGatewayCommand, + CreateGatewayCommandInput, + CreateGatewayCommandOutput, +} from "./commands/CreateGatewayCommand"; +import { + DeleteGatewayCommand, + DeleteGatewayCommandInput, + DeleteGatewayCommandOutput, +} from "./commands/DeleteGatewayCommand"; +import { + DeleteHypervisorCommand, + DeleteHypervisorCommandInput, + DeleteHypervisorCommandOutput, +} from "./commands/DeleteHypervisorCommand"; +import { + DisassociateGatewayFromServerCommand, + DisassociateGatewayFromServerCommandInput, + DisassociateGatewayFromServerCommandOutput, +} from "./commands/DisassociateGatewayFromServerCommand"; +import { + ImportHypervisorConfigurationCommand, + ImportHypervisorConfigurationCommandInput, + ImportHypervisorConfigurationCommandOutput, +} from "./commands/ImportHypervisorConfigurationCommand"; +import { + ListGatewaysCommand, + ListGatewaysCommandInput, + ListGatewaysCommandOutput, +} from "./commands/ListGatewaysCommand"; +import { + ListHypervisorsCommand, + ListHypervisorsCommandInput, + ListHypervisorsCommandOutput, +} from "./commands/ListHypervisorsCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListVirtualMachinesCommand, + ListVirtualMachinesCommandInput, + ListVirtualMachinesCommandOutput, +} from "./commands/ListVirtualMachinesCommand"; +import { + PutMaintenanceStartTimeCommand, + PutMaintenanceStartTimeCommandInput, + PutMaintenanceStartTimeCommandOutput, +} from "./commands/PutMaintenanceStartTimeCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + TestHypervisorConfigurationCommand, + TestHypervisorConfigurationCommandInput, + TestHypervisorConfigurationCommandOutput, +} from "./commands/TestHypervisorConfigurationCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateGatewayInformationCommand, + UpdateGatewayInformationCommandInput, + UpdateGatewayInformationCommandOutput, +} from "./commands/UpdateGatewayInformationCommand"; +import { + UpdateHypervisorCommand, + UpdateHypervisorCommandInput, + UpdateHypervisorCommandOutput, +} from "./commands/UpdateHypervisorCommand"; + +/** + * Backup gateway + *

Backup gateway connects Backup to your hypervisor, so you can + * create, store, and restore backups of your virtual machines (VMs) anywhere, whether + * on-premises or in the VMware Cloud (VMC) on Amazon Web Services.

+ *

Add on-premises resources by connecting to a hypervisor through a gateway. Backup will automatically discover the resources in your hypervisor.

+ *

Use Backup to assign virtual or on-premises resources to a backup plan, or run + * on-demand backups. Once you have backed up your resources, you can view them and restore them + * like any resource supported by Backup.

+ *

To download the Amazon Web Services software to get started, navigate to the Backup console, choose Gateways, then choose Create gateway.

+ */ +export class BackupGateway extends BackupGatewayClient { + /** + *

Associates a backup gateway with your server. After you complete the association process, + * you can back up and restore your VMs through the gateway.

+ */ + public associateGatewayToServer( + args: AssociateGatewayToServerCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateGatewayToServer( + args: AssociateGatewayToServerCommandInput, + cb: (err: any, data?: AssociateGatewayToServerCommandOutput) => void + ): void; + public associateGatewayToServer( + args: AssociateGatewayToServerCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateGatewayToServerCommandOutput) => void + ): void; + public associateGatewayToServer( + args: AssociateGatewayToServerCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateGatewayToServerCommandOutput) => void), + cb?: (err: any, data?: AssociateGatewayToServerCommandOutput) => void + ): Promise | void { + const command = new AssociateGatewayToServerCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Creates a backup gateway. After you create a gateway, you can associate it with a server + * using the AssociateGatewayToServer operation.

+ */ + public createGateway( + args: CreateGatewayCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createGateway( + args: CreateGatewayCommandInput, + cb: (err: any, data?: CreateGatewayCommandOutput) => void + ): void; + public createGateway( + args: CreateGatewayCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateGatewayCommandOutput) => void + ): void; + public createGateway( + args: CreateGatewayCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateGatewayCommandOutput) => void), + cb?: (err: any, data?: CreateGatewayCommandOutput) => void + ): Promise | void { + const command = new CreateGatewayCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Deletes a backup gateway.

+ */ + public deleteGateway( + args: DeleteGatewayCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteGateway( + args: DeleteGatewayCommandInput, + cb: (err: any, data?: DeleteGatewayCommandOutput) => void + ): void; + public deleteGateway( + args: DeleteGatewayCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteGatewayCommandOutput) => void + ): void; + public deleteGateway( + args: DeleteGatewayCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteGatewayCommandOutput) => void), + cb?: (err: any, data?: DeleteGatewayCommandOutput) => void + ): Promise | void { + const command = new DeleteGatewayCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Deletes a hypervisor.

+ */ + public deleteHypervisor( + args: DeleteHypervisorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteHypervisor( + args: DeleteHypervisorCommandInput, + cb: (err: any, data?: DeleteHypervisorCommandOutput) => void + ): void; + public deleteHypervisor( + args: DeleteHypervisorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteHypervisorCommandOutput) => void + ): void; + public deleteHypervisor( + args: DeleteHypervisorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteHypervisorCommandOutput) => void), + cb?: (err: any, data?: DeleteHypervisorCommandOutput) => void + ): Promise | void { + const command = new DeleteHypervisorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Disassociates a backup gateway from the specified server. After the disassociation process + * finishes, the gateway can no longer access the virtual machines on the server.

+ */ + public disassociateGatewayFromServer( + args: DisassociateGatewayFromServerCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateGatewayFromServer( + args: DisassociateGatewayFromServerCommandInput, + cb: (err: any, data?: DisassociateGatewayFromServerCommandOutput) => void + ): void; + public disassociateGatewayFromServer( + args: DisassociateGatewayFromServerCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateGatewayFromServerCommandOutput) => void + ): void; + public disassociateGatewayFromServer( + args: DisassociateGatewayFromServerCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateGatewayFromServerCommandOutput) => void), + cb?: (err: any, data?: DisassociateGatewayFromServerCommandOutput) => void + ): Promise | void { + const command = new DisassociateGatewayFromServerCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Connect to a hypervisor by importing its configuration.

+ */ + public importHypervisorConfiguration( + args: ImportHypervisorConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public importHypervisorConfiguration( + args: ImportHypervisorConfigurationCommandInput, + cb: (err: any, data?: ImportHypervisorConfigurationCommandOutput) => void + ): void; + public importHypervisorConfiguration( + args: ImportHypervisorConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ImportHypervisorConfigurationCommandOutput) => void + ): void; + public importHypervisorConfiguration( + args: ImportHypervisorConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ImportHypervisorConfigurationCommandOutput) => void), + cb?: (err: any, data?: ImportHypervisorConfigurationCommandOutput) => void + ): Promise | void { + const command = new ImportHypervisorConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Lists backup gateways owned by an Amazon Web Services account in an Amazon Web Services Region. The returned list is ordered by gateway Amazon Resource Name (ARN).

+ */ + public listGateways( + args: ListGatewaysCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listGateways(args: ListGatewaysCommandInput, cb: (err: any, data?: ListGatewaysCommandOutput) => void): void; + public listGateways( + args: ListGatewaysCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListGatewaysCommandOutput) => void + ): void; + public listGateways( + args: ListGatewaysCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListGatewaysCommandOutput) => void), + cb?: (err: any, data?: ListGatewaysCommandOutput) => void + ): Promise | void { + const command = new ListGatewaysCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Lists your hypervisors.

+ */ + public listHypervisors( + args: ListHypervisorsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listHypervisors( + args: ListHypervisorsCommandInput, + cb: (err: any, data?: ListHypervisorsCommandOutput) => void + ): void; + public listHypervisors( + args: ListHypervisorsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListHypervisorsCommandOutput) => void + ): void; + public listHypervisors( + args: ListHypervisorsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListHypervisorsCommandOutput) => void), + cb?: (err: any, data?: ListHypervisorsCommandOutput) => void + ): Promise | void { + const command = new ListHypervisorsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Lists the tags applied to the resource identified by its Amazon Resource Name + * (ARN).

+ */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Lists your virtual machines.

+ */ + public listVirtualMachines( + args: ListVirtualMachinesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listVirtualMachines( + args: ListVirtualMachinesCommandInput, + cb: (err: any, data?: ListVirtualMachinesCommandOutput) => void + ): void; + public listVirtualMachines( + args: ListVirtualMachinesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListVirtualMachinesCommandOutput) => void + ): void; + public listVirtualMachines( + args: ListVirtualMachinesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListVirtualMachinesCommandOutput) => void), + cb?: (err: any, data?: ListVirtualMachinesCommandOutput) => void + ): Promise | void { + const command = new ListVirtualMachinesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Set the maintenance start time for a gateway.

+ */ + public putMaintenanceStartTime( + args: PutMaintenanceStartTimeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putMaintenanceStartTime( + args: PutMaintenanceStartTimeCommandInput, + cb: (err: any, data?: PutMaintenanceStartTimeCommandOutput) => void + ): void; + public putMaintenanceStartTime( + args: PutMaintenanceStartTimeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutMaintenanceStartTimeCommandOutput) => void + ): void; + public putMaintenanceStartTime( + args: PutMaintenanceStartTimeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: PutMaintenanceStartTimeCommandOutput) => void), + cb?: (err: any, data?: PutMaintenanceStartTimeCommandOutput) => void + ): Promise | void { + const command = new PutMaintenanceStartTimeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Tag the resource.

+ */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Tests your hypervisor configuration to validate that backup gateway can connect with the + * hypervisor and its resources.

+ */ + public testHypervisorConfiguration( + args: TestHypervisorConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public testHypervisorConfiguration( + args: TestHypervisorConfigurationCommandInput, + cb: (err: any, data?: TestHypervisorConfigurationCommandOutput) => void + ): void; + public testHypervisorConfiguration( + args: TestHypervisorConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TestHypervisorConfigurationCommandOutput) => void + ): void; + public testHypervisorConfiguration( + args: TestHypervisorConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TestHypervisorConfigurationCommandOutput) => void), + cb?: (err: any, data?: TestHypervisorConfigurationCommandOutput) => void + ): Promise | void { + const command = new TestHypervisorConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Removes tags from the resource.

+ */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Updates a gateway's name. Specify which gateway to update using the Amazon Resource Name + * (ARN) of the gateway in your request.

+ */ + public updateGatewayInformation( + args: UpdateGatewayInformationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateGatewayInformation( + args: UpdateGatewayInformationCommandInput, + cb: (err: any, data?: UpdateGatewayInformationCommandOutput) => void + ): void; + public updateGatewayInformation( + args: UpdateGatewayInformationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateGatewayInformationCommandOutput) => void + ): void; + public updateGatewayInformation( + args: UpdateGatewayInformationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateGatewayInformationCommandOutput) => void), + cb?: (err: any, data?: UpdateGatewayInformationCommandOutput) => void + ): Promise | void { + const command = new UpdateGatewayInformationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

Updates a hypervisor metadata, including its host, username, and password. Specify which + * hypervisor to update using the Amazon Resource Name (ARN) of the hypervisor in your + * request.

+ */ + public updateHypervisor( + args: UpdateHypervisorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateHypervisor( + args: UpdateHypervisorCommandInput, + cb: (err: any, data?: UpdateHypervisorCommandOutput) => void + ): void; + public updateHypervisor( + args: UpdateHypervisorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateHypervisorCommandOutput) => void + ): void; + public updateHypervisor( + args: UpdateHypervisorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateHypervisorCommandOutput) => void), + cb?: (err: any, data?: UpdateHypervisorCommandOutput) => void + ): Promise | void { + const command = new UpdateHypervisorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-backup-gateway/src/BackupGatewayClient.ts b/clients/client-backup-gateway/src/BackupGatewayClient.ts new file mode 100644 index 000000000000..fc7826a2430c --- /dev/null +++ b/clients/client-backup-gateway/src/BackupGatewayClient.ts @@ -0,0 +1,326 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { + AssociateGatewayToServerCommandInput, + AssociateGatewayToServerCommandOutput, +} from "./commands/AssociateGatewayToServerCommand"; +import { CreateGatewayCommandInput, CreateGatewayCommandOutput } from "./commands/CreateGatewayCommand"; +import { DeleteGatewayCommandInput, DeleteGatewayCommandOutput } from "./commands/DeleteGatewayCommand"; +import { DeleteHypervisorCommandInput, DeleteHypervisorCommandOutput } from "./commands/DeleteHypervisorCommand"; +import { + DisassociateGatewayFromServerCommandInput, + DisassociateGatewayFromServerCommandOutput, +} from "./commands/DisassociateGatewayFromServerCommand"; +import { + ImportHypervisorConfigurationCommandInput, + ImportHypervisorConfigurationCommandOutput, +} from "./commands/ImportHypervisorConfigurationCommand"; +import { ListGatewaysCommandInput, ListGatewaysCommandOutput } from "./commands/ListGatewaysCommand"; +import { ListHypervisorsCommandInput, ListHypervisorsCommandOutput } from "./commands/ListHypervisorsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListVirtualMachinesCommandInput, + ListVirtualMachinesCommandOutput, +} from "./commands/ListVirtualMachinesCommand"; +import { + PutMaintenanceStartTimeCommandInput, + PutMaintenanceStartTimeCommandOutput, +} from "./commands/PutMaintenanceStartTimeCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + TestHypervisorConfigurationCommandInput, + TestHypervisorConfigurationCommandOutput, +} from "./commands/TestHypervisorConfigurationCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateGatewayInformationCommandInput, + UpdateGatewayInformationCommandOutput, +} from "./commands/UpdateGatewayInformationCommand"; +import { UpdateHypervisorCommandInput, UpdateHypervisorCommandOutput } from "./commands/UpdateHypervisorCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | AssociateGatewayToServerCommandInput + | CreateGatewayCommandInput + | DeleteGatewayCommandInput + | DeleteHypervisorCommandInput + | DisassociateGatewayFromServerCommandInput + | ImportHypervisorConfigurationCommandInput + | ListGatewaysCommandInput + | ListHypervisorsCommandInput + | ListTagsForResourceCommandInput + | ListVirtualMachinesCommandInput + | PutMaintenanceStartTimeCommandInput + | TagResourceCommandInput + | TestHypervisorConfigurationCommandInput + | UntagResourceCommandInput + | UpdateGatewayInformationCommandInput + | UpdateHypervisorCommandInput; + +export type ServiceOutputTypes = + | AssociateGatewayToServerCommandOutput + | CreateGatewayCommandOutput + | DeleteGatewayCommandOutput + | DeleteHypervisorCommandOutput + | DisassociateGatewayFromServerCommandOutput + | ImportHypervisorConfigurationCommandOutput + | ListGatewaysCommandOutput + | ListHypervisorsCommandOutput + | ListTagsForResourceCommandOutput + | ListVirtualMachinesCommandOutput + | PutMaintenanceStartTimeCommandOutput + | TagResourceCommandOutput + | TestHypervisorConfigurationCommandOutput + | UntagResourceCommandOutput + | UpdateGatewayInformationCommandOutput + | UpdateHypervisorCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type BackupGatewayClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of BackupGatewayClient class constructor that set the region, credentials and other options. + */ +export interface BackupGatewayClientConfig extends BackupGatewayClientConfigType {} + +type BackupGatewayClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of BackupGatewayClient class. This is resolved and normalized from the {@link BackupGatewayClientConfig | constructor configuration interface}. + */ +export interface BackupGatewayClientResolvedConfig extends BackupGatewayClientResolvedConfigType {} + +/** + * Backup gateway + *

Backup gateway connects Backup to your hypervisor, so you can + * create, store, and restore backups of your virtual machines (VMs) anywhere, whether + * on-premises or in the VMware Cloud (VMC) on Amazon Web Services.

+ *

Add on-premises resources by connecting to a hypervisor through a gateway. Backup will automatically discover the resources in your hypervisor.

+ *

Use Backup to assign virtual or on-premises resources to a backup plan, or run + * on-demand backups. Once you have backed up your resources, you can view them and restore them + * like any resource supported by Backup.

+ *

To download the Amazon Web Services software to get started, navigate to the Backup console, choose Gateways, then choose Create gateway.

+ */ +export class BackupGatewayClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + BackupGatewayClientResolvedConfig +> { + /** + * The resolved configuration of BackupGatewayClient class. This is resolved and normalized from the {@link BackupGatewayClientConfig | constructor configuration interface}. + */ + readonly config: BackupGatewayClientResolvedConfig; + + constructor(configuration: BackupGatewayClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-backup-gateway/src/commands/AssociateGatewayToServerCommand.ts b/clients/client-backup-gateway/src/commands/AssociateGatewayToServerCommand.ts new file mode 100644 index 000000000000..4f6edb34f33d --- /dev/null +++ b/clients/client-backup-gateway/src/commands/AssociateGatewayToServerCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { AssociateGatewayToServerInput, AssociateGatewayToServerOutput } from "../models/models_0"; +import { + deserializeAws_json1_0AssociateGatewayToServerCommand, + serializeAws_json1_0AssociateGatewayToServerCommand, +} from "../protocols/Aws_json1_0"; + +export interface AssociateGatewayToServerCommandInput extends AssociateGatewayToServerInput {} +export interface AssociateGatewayToServerCommandOutput extends AssociateGatewayToServerOutput, __MetadataBearer {} + +/** + *

Associates a backup gateway with your server. After you complete the association process, + * you can back up and restore your VMs through the gateway.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, AssociateGatewayToServerCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, AssociateGatewayToServerCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new AssociateGatewayToServerCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateGatewayToServerCommandInput} for command's `input` shape. + * @see {@link AssociateGatewayToServerCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class AssociateGatewayToServerCommand extends $Command< + AssociateGatewayToServerCommandInput, + AssociateGatewayToServerCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateGatewayToServerCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "AssociateGatewayToServerCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateGatewayToServerInput.filterSensitiveLog, + outputFilterSensitiveLog: AssociateGatewayToServerOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateGatewayToServerCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0AssociateGatewayToServerCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0AssociateGatewayToServerCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/CreateGatewayCommand.ts b/clients/client-backup-gateway/src/commands/CreateGatewayCommand.ts new file mode 100644 index 000000000000..5f24472eb348 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/CreateGatewayCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { CreateGatewayInput, CreateGatewayOutput } from "../models/models_0"; +import { + deserializeAws_json1_0CreateGatewayCommand, + serializeAws_json1_0CreateGatewayCommand, +} from "../protocols/Aws_json1_0"; + +export interface CreateGatewayCommandInput extends CreateGatewayInput {} +export interface CreateGatewayCommandOutput extends CreateGatewayOutput, __MetadataBearer {} + +/** + *

Creates a backup gateway. After you create a gateway, you can associate it with a server + * using the AssociateGatewayToServer operation.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, CreateGatewayCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, CreateGatewayCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new CreateGatewayCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateGatewayCommandInput} for command's `input` shape. + * @see {@link CreateGatewayCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class CreateGatewayCommand extends $Command< + CreateGatewayCommandInput, + CreateGatewayCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateGatewayCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "CreateGatewayCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateGatewayInput.filterSensitiveLog, + outputFilterSensitiveLog: CreateGatewayOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateGatewayCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0CreateGatewayCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0CreateGatewayCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/DeleteGatewayCommand.ts b/clients/client-backup-gateway/src/commands/DeleteGatewayCommand.ts new file mode 100644 index 000000000000..a5dee5245cd3 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/DeleteGatewayCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { DeleteGatewayInput, DeleteGatewayOutput } from "../models/models_0"; +import { + deserializeAws_json1_0DeleteGatewayCommand, + serializeAws_json1_0DeleteGatewayCommand, +} from "../protocols/Aws_json1_0"; + +export interface DeleteGatewayCommandInput extends DeleteGatewayInput {} +export interface DeleteGatewayCommandOutput extends DeleteGatewayOutput, __MetadataBearer {} + +/** + *

Deletes a backup gateway.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, DeleteGatewayCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, DeleteGatewayCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new DeleteGatewayCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteGatewayCommandInput} for command's `input` shape. + * @see {@link DeleteGatewayCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class DeleteGatewayCommand extends $Command< + DeleteGatewayCommandInput, + DeleteGatewayCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteGatewayCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "DeleteGatewayCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteGatewayInput.filterSensitiveLog, + outputFilterSensitiveLog: DeleteGatewayOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteGatewayCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0DeleteGatewayCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0DeleteGatewayCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/DeleteHypervisorCommand.ts b/clients/client-backup-gateway/src/commands/DeleteHypervisorCommand.ts new file mode 100644 index 000000000000..94b20553215e --- /dev/null +++ b/clients/client-backup-gateway/src/commands/DeleteHypervisorCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { DeleteHypervisorInput, DeleteHypervisorOutput } from "../models/models_0"; +import { + deserializeAws_json1_0DeleteHypervisorCommand, + serializeAws_json1_0DeleteHypervisorCommand, +} from "../protocols/Aws_json1_0"; + +export interface DeleteHypervisorCommandInput extends DeleteHypervisorInput {} +export interface DeleteHypervisorCommandOutput extends DeleteHypervisorOutput, __MetadataBearer {} + +/** + *

Deletes a hypervisor.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, DeleteHypervisorCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, DeleteHypervisorCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new DeleteHypervisorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteHypervisorCommandInput} for command's `input` shape. + * @see {@link DeleteHypervisorCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class DeleteHypervisorCommand extends $Command< + DeleteHypervisorCommandInput, + DeleteHypervisorCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteHypervisorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "DeleteHypervisorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteHypervisorInput.filterSensitiveLog, + outputFilterSensitiveLog: DeleteHypervisorOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteHypervisorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0DeleteHypervisorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0DeleteHypervisorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/DisassociateGatewayFromServerCommand.ts b/clients/client-backup-gateway/src/commands/DisassociateGatewayFromServerCommand.ts new file mode 100644 index 000000000000..b3200b67aff0 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/DisassociateGatewayFromServerCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { DisassociateGatewayFromServerInput, DisassociateGatewayFromServerOutput } from "../models/models_0"; +import { + deserializeAws_json1_0DisassociateGatewayFromServerCommand, + serializeAws_json1_0DisassociateGatewayFromServerCommand, +} from "../protocols/Aws_json1_0"; + +export interface DisassociateGatewayFromServerCommandInput extends DisassociateGatewayFromServerInput {} +export interface DisassociateGatewayFromServerCommandOutput + extends DisassociateGatewayFromServerOutput, + __MetadataBearer {} + +/** + *

Disassociates a backup gateway from the specified server. After the disassociation process + * finishes, the gateway can no longer access the virtual machines on the server.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, DisassociateGatewayFromServerCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, DisassociateGatewayFromServerCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new DisassociateGatewayFromServerCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateGatewayFromServerCommandInput} for command's `input` shape. + * @see {@link DisassociateGatewayFromServerCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class DisassociateGatewayFromServerCommand extends $Command< + DisassociateGatewayFromServerCommandInput, + DisassociateGatewayFromServerCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateGatewayFromServerCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "DisassociateGatewayFromServerCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateGatewayFromServerInput.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateGatewayFromServerOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateGatewayFromServerCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0DisassociateGatewayFromServerCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0DisassociateGatewayFromServerCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/ImportHypervisorConfigurationCommand.ts b/clients/client-backup-gateway/src/commands/ImportHypervisorConfigurationCommand.ts new file mode 100644 index 000000000000..11abaec5d455 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/ImportHypervisorConfigurationCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { ImportHypervisorConfigurationInput, ImportHypervisorConfigurationOutput } from "../models/models_0"; +import { + deserializeAws_json1_0ImportHypervisorConfigurationCommand, + serializeAws_json1_0ImportHypervisorConfigurationCommand, +} from "../protocols/Aws_json1_0"; + +export interface ImportHypervisorConfigurationCommandInput extends ImportHypervisorConfigurationInput {} +export interface ImportHypervisorConfigurationCommandOutput + extends ImportHypervisorConfigurationOutput, + __MetadataBearer {} + +/** + *

Connect to a hypervisor by importing its configuration.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, ImportHypervisorConfigurationCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, ImportHypervisorConfigurationCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new ImportHypervisorConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ImportHypervisorConfigurationCommandInput} for command's `input` shape. + * @see {@link ImportHypervisorConfigurationCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class ImportHypervisorConfigurationCommand extends $Command< + ImportHypervisorConfigurationCommandInput, + ImportHypervisorConfigurationCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ImportHypervisorConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "ImportHypervisorConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ImportHypervisorConfigurationInput.filterSensitiveLog, + outputFilterSensitiveLog: ImportHypervisorConfigurationOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ImportHypervisorConfigurationCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ImportHypervisorConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0ImportHypervisorConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/ListGatewaysCommand.ts b/clients/client-backup-gateway/src/commands/ListGatewaysCommand.ts new file mode 100644 index 000000000000..f7e3c537a06e --- /dev/null +++ b/clients/client-backup-gateway/src/commands/ListGatewaysCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { ListGatewaysInput, ListGatewaysOutput } from "../models/models_0"; +import { + deserializeAws_json1_0ListGatewaysCommand, + serializeAws_json1_0ListGatewaysCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListGatewaysCommandInput extends ListGatewaysInput {} +export interface ListGatewaysCommandOutput extends ListGatewaysOutput, __MetadataBearer {} + +/** + *

Lists backup gateways owned by an Amazon Web Services account in an Amazon Web Services Region. The returned list is ordered by gateway Amazon Resource Name (ARN).

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, ListGatewaysCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, ListGatewaysCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new ListGatewaysCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListGatewaysCommandInput} for command's `input` shape. + * @see {@link ListGatewaysCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class ListGatewaysCommand extends $Command< + ListGatewaysCommandInput, + ListGatewaysCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListGatewaysCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "ListGatewaysCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListGatewaysInput.filterSensitiveLog, + outputFilterSensitiveLog: ListGatewaysOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListGatewaysCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListGatewaysCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListGatewaysCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/ListHypervisorsCommand.ts b/clients/client-backup-gateway/src/commands/ListHypervisorsCommand.ts new file mode 100644 index 000000000000..c6d46b83eeb3 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/ListHypervisorsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { ListHypervisorsInput, ListHypervisorsOutput } from "../models/models_0"; +import { + deserializeAws_json1_0ListHypervisorsCommand, + serializeAws_json1_0ListHypervisorsCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListHypervisorsCommandInput extends ListHypervisorsInput {} +export interface ListHypervisorsCommandOutput extends ListHypervisorsOutput, __MetadataBearer {} + +/** + *

Lists your hypervisors.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, ListHypervisorsCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, ListHypervisorsCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new ListHypervisorsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListHypervisorsCommandInput} for command's `input` shape. + * @see {@link ListHypervisorsCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class ListHypervisorsCommand extends $Command< + ListHypervisorsCommandInput, + ListHypervisorsCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListHypervisorsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "ListHypervisorsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListHypervisorsInput.filterSensitiveLog, + outputFilterSensitiveLog: ListHypervisorsOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListHypervisorsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListHypervisorsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListHypervisorsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/ListTagsForResourceCommand.ts b/clients/client-backup-gateway/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..58ccd92bba8b --- /dev/null +++ b/clients/client-backup-gateway/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { ListTagsForResourceInput, ListTagsForResourceOutput } from "../models/models_0"; +import { + deserializeAws_json1_0ListTagsForResourceCommand, + serializeAws_json1_0ListTagsForResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceInput {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceOutput, __MetadataBearer {} + +/** + *

Lists the tags applied to the resource identified by its Amazon Resource Name + * (ARN).

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, ListTagsForResourceCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, ListTagsForResourceCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceInput.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/ListVirtualMachinesCommand.ts b/clients/client-backup-gateway/src/commands/ListVirtualMachinesCommand.ts new file mode 100644 index 000000000000..9df8da24e3c7 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/ListVirtualMachinesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { ListVirtualMachinesInput, ListVirtualMachinesOutput } from "../models/models_0"; +import { + deserializeAws_json1_0ListVirtualMachinesCommand, + serializeAws_json1_0ListVirtualMachinesCommand, +} from "../protocols/Aws_json1_0"; + +export interface ListVirtualMachinesCommandInput extends ListVirtualMachinesInput {} +export interface ListVirtualMachinesCommandOutput extends ListVirtualMachinesOutput, __MetadataBearer {} + +/** + *

Lists your virtual machines.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, ListVirtualMachinesCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, ListVirtualMachinesCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new ListVirtualMachinesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListVirtualMachinesCommandInput} for command's `input` shape. + * @see {@link ListVirtualMachinesCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class ListVirtualMachinesCommand extends $Command< + ListVirtualMachinesCommandInput, + ListVirtualMachinesCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListVirtualMachinesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "ListVirtualMachinesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListVirtualMachinesInput.filterSensitiveLog, + outputFilterSensitiveLog: ListVirtualMachinesOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListVirtualMachinesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0ListVirtualMachinesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0ListVirtualMachinesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/PutMaintenanceStartTimeCommand.ts b/clients/client-backup-gateway/src/commands/PutMaintenanceStartTimeCommand.ts new file mode 100644 index 000000000000..a73bbe35229f --- /dev/null +++ b/clients/client-backup-gateway/src/commands/PutMaintenanceStartTimeCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { PutMaintenanceStartTimeInput, PutMaintenanceStartTimeOutput } from "../models/models_0"; +import { + deserializeAws_json1_0PutMaintenanceStartTimeCommand, + serializeAws_json1_0PutMaintenanceStartTimeCommand, +} from "../protocols/Aws_json1_0"; + +export interface PutMaintenanceStartTimeCommandInput extends PutMaintenanceStartTimeInput {} +export interface PutMaintenanceStartTimeCommandOutput extends PutMaintenanceStartTimeOutput, __MetadataBearer {} + +/** + *

Set the maintenance start time for a gateway.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, PutMaintenanceStartTimeCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, PutMaintenanceStartTimeCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new PutMaintenanceStartTimeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutMaintenanceStartTimeCommandInput} for command's `input` shape. + * @see {@link PutMaintenanceStartTimeCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class PutMaintenanceStartTimeCommand extends $Command< + PutMaintenanceStartTimeCommandInput, + PutMaintenanceStartTimeCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutMaintenanceStartTimeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "PutMaintenanceStartTimeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutMaintenanceStartTimeInput.filterSensitiveLog, + outputFilterSensitiveLog: PutMaintenanceStartTimeOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: PutMaintenanceStartTimeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0PutMaintenanceStartTimeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0PutMaintenanceStartTimeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/TagResourceCommand.ts b/clients/client-backup-gateway/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..571f0f73a0fb --- /dev/null +++ b/clients/client-backup-gateway/src/commands/TagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { TagResourceInput, TagResourceOutput } from "../models/models_0"; +import { + deserializeAws_json1_0TagResourceCommand, + serializeAws_json1_0TagResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface TagResourceCommandInput extends TagResourceInput {} +export interface TagResourceCommandOutput extends TagResourceOutput, __MetadataBearer {} + +/** + *

Tag the resource.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, TagResourceCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, TagResourceCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceInput.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/TestHypervisorConfigurationCommand.ts b/clients/client-backup-gateway/src/commands/TestHypervisorConfigurationCommand.ts new file mode 100644 index 000000000000..f1ebd3602a6d --- /dev/null +++ b/clients/client-backup-gateway/src/commands/TestHypervisorConfigurationCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { TestHypervisorConfigurationInput, TestHypervisorConfigurationOutput } from "../models/models_0"; +import { + deserializeAws_json1_0TestHypervisorConfigurationCommand, + serializeAws_json1_0TestHypervisorConfigurationCommand, +} from "../protocols/Aws_json1_0"; + +export interface TestHypervisorConfigurationCommandInput extends TestHypervisorConfigurationInput {} +export interface TestHypervisorConfigurationCommandOutput extends TestHypervisorConfigurationOutput, __MetadataBearer {} + +/** + *

Tests your hypervisor configuration to validate that backup gateway can connect with the + * hypervisor and its resources.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, TestHypervisorConfigurationCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, TestHypervisorConfigurationCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new TestHypervisorConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TestHypervisorConfigurationCommandInput} for command's `input` shape. + * @see {@link TestHypervisorConfigurationCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class TestHypervisorConfigurationCommand extends $Command< + TestHypervisorConfigurationCommandInput, + TestHypervisorConfigurationCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TestHypervisorConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "TestHypervisorConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TestHypervisorConfigurationInput.filterSensitiveLog, + outputFilterSensitiveLog: TestHypervisorConfigurationOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TestHypervisorConfigurationCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0TestHypervisorConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0TestHypervisorConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/UntagResourceCommand.ts b/clients/client-backup-gateway/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..2bda8eb826ce --- /dev/null +++ b/clients/client-backup-gateway/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { UntagResourceInput, UntagResourceOutput } from "../models/models_0"; +import { + deserializeAws_json1_0UntagResourceCommand, + serializeAws_json1_0UntagResourceCommand, +} from "../protocols/Aws_json1_0"; + +export interface UntagResourceCommandInput extends UntagResourceInput {} +export interface UntagResourceCommandOutput extends UntagResourceOutput, __MetadataBearer {} + +/** + *

Removes tags from the resource.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, UntagResourceCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, UntagResourceCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceInput.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/UpdateGatewayInformationCommand.ts b/clients/client-backup-gateway/src/commands/UpdateGatewayInformationCommand.ts new file mode 100644 index 000000000000..ed58e00858d2 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/UpdateGatewayInformationCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { UpdateGatewayInformationInput, UpdateGatewayInformationOutput } from "../models/models_0"; +import { + deserializeAws_json1_0UpdateGatewayInformationCommand, + serializeAws_json1_0UpdateGatewayInformationCommand, +} from "../protocols/Aws_json1_0"; + +export interface UpdateGatewayInformationCommandInput extends UpdateGatewayInformationInput {} +export interface UpdateGatewayInformationCommandOutput extends UpdateGatewayInformationOutput, __MetadataBearer {} + +/** + *

Updates a gateway's name. Specify which gateway to update using the Amazon Resource Name + * (ARN) of the gateway in your request.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, UpdateGatewayInformationCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, UpdateGatewayInformationCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new UpdateGatewayInformationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateGatewayInformationCommandInput} for command's `input` shape. + * @see {@link UpdateGatewayInformationCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class UpdateGatewayInformationCommand extends $Command< + UpdateGatewayInformationCommandInput, + UpdateGatewayInformationCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateGatewayInformationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "UpdateGatewayInformationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateGatewayInformationInput.filterSensitiveLog, + outputFilterSensitiveLog: UpdateGatewayInformationOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateGatewayInformationCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0UpdateGatewayInformationCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0UpdateGatewayInformationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/UpdateHypervisorCommand.ts b/clients/client-backup-gateway/src/commands/UpdateHypervisorCommand.ts new file mode 100644 index 000000000000..62307f46ff8a --- /dev/null +++ b/clients/client-backup-gateway/src/commands/UpdateHypervisorCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { BackupGatewayClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../BackupGatewayClient"; +import { UpdateHypervisorInput, UpdateHypervisorOutput } from "../models/models_0"; +import { + deserializeAws_json1_0UpdateHypervisorCommand, + serializeAws_json1_0UpdateHypervisorCommand, +} from "../protocols/Aws_json1_0"; + +export interface UpdateHypervisorCommandInput extends UpdateHypervisorInput {} +export interface UpdateHypervisorCommandOutput extends UpdateHypervisorOutput, __MetadataBearer {} + +/** + *

Updates a hypervisor metadata, including its host, username, and password. Specify which + * hypervisor to update using the Amazon Resource Name (ARN) of the hypervisor in your + * request.

+ * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { BackupGatewayClient, UpdateHypervisorCommand } from "@aws-sdk/client-backup-gateway"; // ES Modules import + * // const { BackupGatewayClient, UpdateHypervisorCommand } = require("@aws-sdk/client-backup-gateway"); // CommonJS import + * const client = new BackupGatewayClient(config); + * const command = new UpdateHypervisorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateHypervisorCommandInput} for command's `input` shape. + * @see {@link UpdateHypervisorCommandOutput} for command's `response` shape. + * @see {@link BackupGatewayClientResolvedConfig | config} for BackupGatewayClient's `config` shape. + * + */ +export class UpdateHypervisorCommand extends $Command< + UpdateHypervisorCommandInput, + UpdateHypervisorCommandOutput, + BackupGatewayClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateHypervisorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: BackupGatewayClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "BackupGatewayClient"; + const commandName = "UpdateHypervisorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateHypervisorInput.filterSensitiveLog, + outputFilterSensitiveLog: UpdateHypervisorOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateHypervisorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0UpdateHypervisorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_0UpdateHypervisorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-backup-gateway/src/commands/index.ts b/clients/client-backup-gateway/src/commands/index.ts new file mode 100644 index 000000000000..ac711f70e1c5 --- /dev/null +++ b/clients/client-backup-gateway/src/commands/index.ts @@ -0,0 +1,16 @@ +export * from "./AssociateGatewayToServerCommand"; +export * from "./CreateGatewayCommand"; +export * from "./DeleteGatewayCommand"; +export * from "./DeleteHypervisorCommand"; +export * from "./DisassociateGatewayFromServerCommand"; +export * from "./ImportHypervisorConfigurationCommand"; +export * from "./ListGatewaysCommand"; +export * from "./ListHypervisorsCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./ListVirtualMachinesCommand"; +export * from "./PutMaintenanceStartTimeCommand"; +export * from "./TagResourceCommand"; +export * from "./TestHypervisorConfigurationCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateGatewayInformationCommand"; +export * from "./UpdateHypervisorCommand"; diff --git a/clients/client-backup-gateway/src/endpoints.ts b/clients/client-backup-gateway/src/endpoints.ts new file mode 100644 index 000000000000..7f4326c6cd4e --- /dev/null +++ b/clients/client-backup-gateway/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "backup-gateway.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "backup-gateway-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "backup-gateway-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "backup-gateway.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "backup-gateway.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "backup-gateway-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "backup-gateway-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "backup-gateway.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "backup-gateway.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "backup-gateway-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "backup-gateway.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "backup-gateway-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "backup-gateway.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "backup-gateway-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "backup-gateway-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "backup-gateway.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "backup-gateway", + regionHash, + partitionHash, + }); diff --git a/clients/client-backup-gateway/src/index.ts b/clients/client-backup-gateway/src/index.ts new file mode 100644 index 000000000000..dbb9d3b2f8dd --- /dev/null +++ b/clients/client-backup-gateway/src/index.ts @@ -0,0 +1,5 @@ +export * from "./BackupGateway"; +export * from "./BackupGatewayClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-backup-gateway/src/models/index.ts b/clients/client-backup-gateway/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-backup-gateway/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-backup-gateway/src/models/models_0.ts b/clients/client-backup-gateway/src/models/models_0.ts new file mode 100644 index 000000000000..454091d91e49 --- /dev/null +++ b/clients/client-backup-gateway/src/models/models_0.ts @@ -0,0 +1,968 @@ +import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

The operation cannot proceed because you have insufficient permissions.

+ */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + /** + *

A description of why you have insufficient permissions.

+ */ + ErrorCode: string | undefined; + + Message?: string; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +export interface AssociateGatewayToServerInput { + /** + *

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + * to return a list of gateways for your account and Amazon Web Services Region.

+ */ + GatewayArn: string | undefined; + + /** + *

The Amazon Resource Name (ARN) of the server that hosts your virtual machines.

+ */ + ServerArn: string | undefined; +} + +export namespace AssociateGatewayToServerInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateGatewayToServerInput): any => ({ + ...obj, + }); +} + +export interface AssociateGatewayToServerOutput { + /** + *

The Amazon Resource Name (ARN) of a gateway.

+ */ + GatewayArn?: string; +} + +export namespace AssociateGatewayToServerOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateGatewayToServerOutput): any => ({ + ...obj, + }); +} + +/** + *

The operation cannot proceed because it is not supported.

+ */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + /** + *

A description of why the operation is not supported.

+ */ + ErrorCode: string | undefined; + + Message?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *

The operation did not succeed because an internal error occurred. Try again later.

+ */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + /** + *

A description of which internal error occured.

+ */ + ErrorCode?: string; + + Message?: string; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

The operation did not succeed because a validation error occurred.

+ */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + /** + *

A description of what caused the validation error.

+ */ + ErrorCode?: string; + + Message?: string; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +export enum GatewayType { + BACKUP_VM = "BACKUP_VM", +} + +/** + *

A key-value pair you can use to manage, filter, and search for your resources. Allowed + * characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ : + * /.

+ */ +export interface Tag { + /** + *

The key part of a tag's key-value pair. The key can't start with aws:.

+ */ + Key: string | undefined; + + /** + *

The key part of a value's key-value pair.

+ */ + Value: string | undefined; +} + +export namespace Tag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Tag): any => ({ + ...obj, + }); +} + +export interface CreateGatewayInput { + /** + *

The activation key of the created gateway.

+ */ + ActivationKey: string | undefined; + + /** + *

The display name of the created gateway.

+ */ + GatewayDisplayName: string | undefined; + + /** + *

The type of created gateway.

+ */ + GatewayType: GatewayType | string | undefined; + + /** + *

A list of up to 50 tags to assign to the gateway. Each tag is a key-value pair.

+ */ + Tags?: Tag[]; +} + +export namespace CreateGatewayInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateGatewayInput): any => ({ + ...obj, + }); +} + +export interface CreateGatewayOutput { + /** + *

The Amazon Resource Name (ARN) of the gateway you create.

+ */ + GatewayArn?: string; +} + +export namespace CreateGatewayOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateGatewayOutput): any => ({ + ...obj, + }); +} + +export interface DeleteGatewayInput { + /** + *

The Amazon Resource Name (ARN) of the gateway to delete.

+ */ + GatewayArn: string | undefined; +} + +export namespace DeleteGatewayInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteGatewayInput): any => ({ + ...obj, + }); +} + +export interface DeleteGatewayOutput { + /** + *

The Amazon Resource Name (ARN) of the gateway you deleted.

+ */ + GatewayArn?: string; +} + +export namespace DeleteGatewayOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteGatewayOutput): any => ({ + ...obj, + }); +} + +/** + *

A resource that is required for the action wasn't found.

+ */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + /** + *

A description of which resource wasn't found.

+ */ + ErrorCode?: string; + + Message?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +export interface DisassociateGatewayFromServerInput { + /** + *

The Amazon Resource Name (ARN) of the gateway to disassociate.

+ */ + GatewayArn: string | undefined; +} + +export namespace DisassociateGatewayFromServerInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateGatewayFromServerInput): any => ({ + ...obj, + }); +} + +export interface DisassociateGatewayFromServerOutput { + /** + *

The Amazon Resource Name (ARN) of the gateway you disassociated.

+ */ + GatewayArn?: string; +} + +export namespace DisassociateGatewayFromServerOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateGatewayFromServerOutput): any => ({ + ...obj, + }); +} + +export interface ListGatewaysInput { + /** + *

The maximum number of gateways to list.

+ */ + MaxResults?: number; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return MaxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListGatewaysInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListGatewaysInput): any => ({ + ...obj, + }); +} + +/** + *

A gateway is an Backup Gateway appliance that runs on the customer's network + * to provide seamless connectivity to backup storage in the Amazon Web Services Cloud.

+ */ +export interface Gateway { + /** + *

The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation + * to return a list of gateways for your account and Amazon Web Services Region.

+ */ + GatewayArn?: string; + + /** + *

The display name of the gateway.

+ */ + GatewayDisplayName?: string; + + /** + *

The type of the gateway.

+ */ + GatewayType?: GatewayType | string; + + /** + *

The hypervisor ID of the gateway.

+ */ + HypervisorId?: string; + + /** + *

The last time Backup gateway communicated with the gateway, in Unix format and + * UTC time.

+ */ + LastSeenTime?: Date; +} + +export namespace Gateway { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Gateway): any => ({ + ...obj, + }); +} + +export interface ListGatewaysOutput { + /** + *

A list of your gateways.

+ */ + Gateways?: Gateway[]; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return maxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListGatewaysOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListGatewaysOutput): any => ({ + ...obj, + }); +} + +export interface PutMaintenanceStartTimeInput { + /** + *

The Amazon Resource Name (ARN) for the gateway, used to specify its maintenance start + * time.

+ */ + GatewayArn: string | undefined; + + /** + *

The hour of the day to start maintenance on a gateway.

+ */ + HourOfDay: number | undefined; + + /** + *

The minute of the hour to start maintenance on a gateway.

+ */ + MinuteOfHour: number | undefined; + + /** + *

The day of the week to start maintenance on a gateway.

+ */ + DayOfWeek?: number; + + /** + *

The day of the month start maintenance on a gateway.

+ *

Valid values range from Sunday to Saturday.

+ */ + DayOfMonth?: number; +} + +export namespace PutMaintenanceStartTimeInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutMaintenanceStartTimeInput): any => ({ + ...obj, + }); +} + +export interface PutMaintenanceStartTimeOutput { + /** + *

The Amazon Resource Name (ARN) of a gateway for which you set the maintenance start + * time.

+ */ + GatewayArn?: string; +} + +export namespace PutMaintenanceStartTimeOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutMaintenanceStartTimeOutput): any => ({ + ...obj, + }); +} + +export interface TestHypervisorConfigurationInput { + /** + *

The Amazon Resource Name (ARN) of the gateway to the hypervisor to test.

+ */ + GatewayArn: string | undefined; + + /** + *

The server host of the hypervisor. This can be either an IP address or a fully-qualified + * domain name (FQDN).

+ */ + Host: string | undefined; + + /** + *

The username for the hypervisor.

+ */ + Username?: string; + + /** + *

The password for the hypervisor.

+ */ + Password?: string; +} + +export namespace TestHypervisorConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TestHypervisorConfigurationInput): any => ({ + ...obj, + ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.Password && { Password: SENSITIVE_STRING }), + }); +} + +export interface TestHypervisorConfigurationOutput {} + +export namespace TestHypervisorConfigurationOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TestHypervisorConfigurationOutput): any => ({ + ...obj, + }); +} + +export interface UpdateGatewayInformationInput { + /** + *

The Amazon Resource Name (ARN) of the gateway to update.

+ */ + GatewayArn: string | undefined; + + /** + *

The updated display name of the gateway.

+ */ + GatewayDisplayName?: string; +} + +export namespace UpdateGatewayInformationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateGatewayInformationInput): any => ({ + ...obj, + }); +} + +export interface UpdateGatewayInformationOutput { + /** + *

The Amazon Resource Name (ARN) of the gateway you updated.

+ */ + GatewayArn?: string; +} + +export namespace UpdateGatewayInformationOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateGatewayInformationOutput): any => ({ + ...obj, + }); +} + +export interface DeleteHypervisorInput { + /** + *

The Amazon Resource Name (ARN) of the hypervisor to delete.

+ */ + HypervisorArn: string | undefined; +} + +export namespace DeleteHypervisorInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteHypervisorInput): any => ({ + ...obj, + }); +} + +export interface DeleteHypervisorOutput { + /** + *

The Amazon Resource Name (ARN) of the hypervisor you deleted.

+ */ + HypervisorArn?: string; +} + +export namespace DeleteHypervisorOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteHypervisorOutput): any => ({ + ...obj, + }); +} + +export interface ImportHypervisorConfigurationInput { + /** + *

The name of the hypervisor.

+ */ + Name: string | undefined; + + /** + *

The server host of the hypervisor. This can be either an IP address or a fully-qualified + * domain name (FQDN).

+ */ + Host: string | undefined; + + /** + *

The username for the hypervisor.

+ */ + Username?: string; + + /** + *

The password for the hypervisor.

+ */ + Password?: string; + + /** + *

The Key Management Service for the hypervisor.

+ */ + KmsKeyArn?: string; + + /** + *

The tags of the hypervisor configuration to import.

+ */ + Tags?: Tag[]; +} + +export namespace ImportHypervisorConfigurationInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportHypervisorConfigurationInput): any => ({ + ...obj, + ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.Password && { Password: SENSITIVE_STRING }), + }); +} + +export interface ImportHypervisorConfigurationOutput { + /** + *

The Amazon Resource Name (ARN) of the hypervisor you disassociated.

+ */ + HypervisorArn?: string; +} + +export namespace ImportHypervisorConfigurationOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportHypervisorConfigurationOutput): any => ({ + ...obj, + }); +} + +export interface ListHypervisorsInput { + /** + *

The maximum number of hypervisors to list.

+ */ + MaxResults?: number; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return maxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListHypervisorsInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListHypervisorsInput): any => ({ + ...obj, + }); +} + +export enum HypervisorState { + ERROR = "ERROR", + OFFLINE = "OFFLINE", + ONLINE = "ONLINE", + PENDING = "PENDING", +} + +/** + *

Represents the hypervisor's permissions to which the gateway will connect.

+ *

A hypervisor is hardware, software, or firmware that creates and manages virtual machines, + * and allocates resources to them.

+ */ +export interface Hypervisor { + /** + *

The server host of the hypervisor. This can be either an IP address or a fully-qualified + * domain name (FQDN).

+ */ + Host?: string; + + /** + *

The Amazon Resource Name (ARN) of the hypervisor.

+ */ + HypervisorArn?: string; + + /** + *

The Amazon Resource Name (ARN) of the Key Management Service used to encrypt the + * hypervisor.

+ */ + KmsKeyArn?: string; + + /** + *

The name of the hypervisor.

+ */ + Name?: string; + + /** + *

The state of the hypervisor.

+ */ + State?: HypervisorState | string; +} + +export namespace Hypervisor { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Hypervisor): any => ({ + ...obj, + }); +} + +export interface ListHypervisorsOutput { + /** + *

A list of your Hypervisor objects, ordered by their Amazon Resource Names + * (ARNs).

+ */ + Hypervisors?: Hypervisor[]; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return maxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListHypervisorsOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListHypervisorsOutput): any => ({ + ...obj, + }); +} + +export interface UpdateHypervisorInput { + /** + *

The Amazon Resource Name (ARN) of the hypervisor to update.

+ */ + HypervisorArn: string | undefined; + + /** + *

The updated host of the hypervisor. This can be either an IP address or a fully-qualified + * domain name (FQDN).

+ */ + Host?: string; + + /** + *

The updated username for the hypervisor.

+ */ + Username?: string; + + /** + *

The updated password for the hypervisor.

+ */ + Password?: string; +} + +export namespace UpdateHypervisorInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateHypervisorInput): any => ({ + ...obj, + ...(obj.Username && { Username: SENSITIVE_STRING }), + ...(obj.Password && { Password: SENSITIVE_STRING }), + }); +} + +export interface UpdateHypervisorOutput { + /** + *

The Amazon Resource Name (ARN) of the hypervisor you updated.

+ */ + HypervisorArn?: string; +} + +export namespace UpdateHypervisorOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateHypervisorOutput): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceInput { + /** + *

The Amazon Resource Name (ARN) of the resource's tags to list.

+ */ + ResourceArn: string | undefined; +} + +export namespace ListTagsForResourceInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceInput): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceOutput { + /** + *

The Amazon Resource Name (ARN) of the resource's tags that you listed.

+ */ + ResourceArn?: string; + + /** + *

A list of the resource's tags.

+ */ + Tags?: Tag[]; +} + +export namespace ListTagsForResourceOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceOutput): any => ({ + ...obj, + }); +} + +export interface ListVirtualMachinesInput { + /** + *

The maximum number of virtual machines to list.

+ */ + MaxResults?: number; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return maxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListVirtualMachinesInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListVirtualMachinesInput): any => ({ + ...obj, + }); +} + +/** + *

A virtual machine that is on a hypervisor.

+ */ +export interface VirtualMachine { + /** + *

The host name of the virtual machine.

+ */ + HostName?: string; + + /** + *

The ID of the virtual machine's hypervisor.

+ */ + HypervisorId?: string; + + /** + *

The name of the virtual machine.

+ */ + Name?: string; + + /** + *

The path of the virtual machine.

+ */ + Path?: string; + + /** + *

The Amazon Resource Name (ARN) of the virtual machine.

+ */ + ResourceArn?: string; + + /** + *

The most recent date a virtual machine was backed up, in Unix format and UTC time.

+ */ + LastBackupDate?: Date; +} + +export namespace VirtualMachine { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VirtualMachine): any => ({ + ...obj, + }); +} + +export interface ListVirtualMachinesOutput { + /** + *

A list of your VirtualMachine objects, ordered by their Amazon Resource Names + * (ARNs).

+ */ + VirtualMachines?: VirtualMachine[]; + + /** + *

The next item following a partial list of returned resources. For example, if a request is + * made to return maxResults number of resources, NextToken allows you + * to return more items in your list starting at the location pointed to by the next + * token.

+ */ + NextToken?: string; +} + +export namespace ListVirtualMachinesOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListVirtualMachinesOutput): any => ({ + ...obj, + }); +} + +export interface TagResourceInput { + /** + *

The Amazon Resource Name (ARN) of the resource to tag.

+ */ + ResourceARN: string | undefined; + + /** + *

A list of tags to assign to the resource.

+ */ + Tags: Tag[] | undefined; +} + +export namespace TagResourceInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceInput): any => ({ + ...obj, + }); +} + +export interface TagResourceOutput { + /** + *

The Amazon Resource Name (ARN) of the resource you tagged.

+ */ + ResourceARN?: string; +} + +export namespace TagResourceOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceOutput): any => ({ + ...obj, + }); +} + +export interface UntagResourceInput { + /** + *

The Amazon Resource Name (ARN) of the resource from which to remove tags.

+ */ + ResourceARN: string | undefined; + + /** + *

The list of tag keys specifying which tags to remove.

+ */ + TagKeys: string[] | undefined; +} + +export namespace UntagResourceInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceInput): any => ({ + ...obj, + }); +} + +export interface UntagResourceOutput { + /** + *

The Amazon Resource Name (ARN) of the resource from which you removed tags.

+ */ + ResourceARN?: string; +} + +export namespace UntagResourceOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceOutput): any => ({ + ...obj, + }); +} diff --git a/clients/client-backup-gateway/src/pagination/Interfaces.ts b/clients/client-backup-gateway/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..b4153dbc5620 --- /dev/null +++ b/clients/client-backup-gateway/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { BackupGateway } from "../BackupGateway"; +import { BackupGatewayClient } from "../BackupGatewayClient"; + +export interface BackupGatewayPaginationConfiguration extends PaginationConfiguration { + client: BackupGateway | BackupGatewayClient; +} diff --git a/clients/client-backup-gateway/src/pagination/ListGatewaysPaginator.ts b/clients/client-backup-gateway/src/pagination/ListGatewaysPaginator.ts new file mode 100644 index 000000000000..b5d931c9a8ef --- /dev/null +++ b/clients/client-backup-gateway/src/pagination/ListGatewaysPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { BackupGateway } from "../BackupGateway"; +import { BackupGatewayClient } from "../BackupGatewayClient"; +import { + ListGatewaysCommand, + ListGatewaysCommandInput, + ListGatewaysCommandOutput, +} from "../commands/ListGatewaysCommand"; +import { BackupGatewayPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: BackupGatewayClient, + input: ListGatewaysCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListGatewaysCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: BackupGateway, + input: ListGatewaysCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listGateways(input, ...args); +}; +export async function* paginateListGateways( + config: BackupGatewayPaginationConfiguration, + input: ListGatewaysCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListGatewaysCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof BackupGateway) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof BackupGatewayClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected BackupGateway | BackupGatewayClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-backup-gateway/src/pagination/ListHypervisorsPaginator.ts b/clients/client-backup-gateway/src/pagination/ListHypervisorsPaginator.ts new file mode 100644 index 000000000000..762d9ac81e2b --- /dev/null +++ b/clients/client-backup-gateway/src/pagination/ListHypervisorsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { BackupGateway } from "../BackupGateway"; +import { BackupGatewayClient } from "../BackupGatewayClient"; +import { + ListHypervisorsCommand, + ListHypervisorsCommandInput, + ListHypervisorsCommandOutput, +} from "../commands/ListHypervisorsCommand"; +import { BackupGatewayPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: BackupGatewayClient, + input: ListHypervisorsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListHypervisorsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: BackupGateway, + input: ListHypervisorsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listHypervisors(input, ...args); +}; +export async function* paginateListHypervisors( + config: BackupGatewayPaginationConfiguration, + input: ListHypervisorsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListHypervisorsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof BackupGateway) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof BackupGatewayClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected BackupGateway | BackupGatewayClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-backup-gateway/src/pagination/ListVirtualMachinesPaginator.ts b/clients/client-backup-gateway/src/pagination/ListVirtualMachinesPaginator.ts new file mode 100644 index 000000000000..cad89e09f5f3 --- /dev/null +++ b/clients/client-backup-gateway/src/pagination/ListVirtualMachinesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { BackupGateway } from "../BackupGateway"; +import { BackupGatewayClient } from "../BackupGatewayClient"; +import { + ListVirtualMachinesCommand, + ListVirtualMachinesCommandInput, + ListVirtualMachinesCommandOutput, +} from "../commands/ListVirtualMachinesCommand"; +import { BackupGatewayPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: BackupGatewayClient, + input: ListVirtualMachinesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListVirtualMachinesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: BackupGateway, + input: ListVirtualMachinesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listVirtualMachines(input, ...args); +}; +export async function* paginateListVirtualMachines( + config: BackupGatewayPaginationConfiguration, + input: ListVirtualMachinesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListVirtualMachinesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof BackupGateway) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof BackupGatewayClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected BackupGateway | BackupGatewayClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-backup-gateway/src/pagination/index.ts b/clients/client-backup-gateway/src/pagination/index.ts new file mode 100644 index 000000000000..25db32deafae --- /dev/null +++ b/clients/client-backup-gateway/src/pagination/index.ts @@ -0,0 +1,4 @@ +export * from "./Interfaces"; +export * from "./ListGatewaysPaginator"; +export * from "./ListHypervisorsPaginator"; +export * from "./ListVirtualMachinesPaginator"; diff --git a/clients/client-backup-gateway/src/protocols/Aws_json1_0.ts b/clients/client-backup-gateway/src/protocols/Aws_json1_0.ts new file mode 100644 index 000000000000..1836e17a7275 --- /dev/null +++ b/clients/client-backup-gateway/src/protocols/Aws_json1_0.ts @@ -0,0 +1,2041 @@ +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectString as __expectString, + parseEpochTimestamp as __parseEpochTimestamp, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + HeaderBag as __HeaderBag, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { + AssociateGatewayToServerCommandInput, + AssociateGatewayToServerCommandOutput, +} from "../commands/AssociateGatewayToServerCommand"; +import { CreateGatewayCommandInput, CreateGatewayCommandOutput } from "../commands/CreateGatewayCommand"; +import { DeleteGatewayCommandInput, DeleteGatewayCommandOutput } from "../commands/DeleteGatewayCommand"; +import { DeleteHypervisorCommandInput, DeleteHypervisorCommandOutput } from "../commands/DeleteHypervisorCommand"; +import { + DisassociateGatewayFromServerCommandInput, + DisassociateGatewayFromServerCommandOutput, +} from "../commands/DisassociateGatewayFromServerCommand"; +import { + ImportHypervisorConfigurationCommandInput, + ImportHypervisorConfigurationCommandOutput, +} from "../commands/ImportHypervisorConfigurationCommand"; +import { ListGatewaysCommandInput, ListGatewaysCommandOutput } from "../commands/ListGatewaysCommand"; +import { ListHypervisorsCommandInput, ListHypervisorsCommandOutput } from "../commands/ListHypervisorsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { + ListVirtualMachinesCommandInput, + ListVirtualMachinesCommandOutput, +} from "../commands/ListVirtualMachinesCommand"; +import { + PutMaintenanceStartTimeCommandInput, + PutMaintenanceStartTimeCommandOutput, +} from "../commands/PutMaintenanceStartTimeCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { + TestHypervisorConfigurationCommandInput, + TestHypervisorConfigurationCommandOutput, +} from "../commands/TestHypervisorConfigurationCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateGatewayInformationCommandInput, + UpdateGatewayInformationCommandOutput, +} from "../commands/UpdateGatewayInformationCommand"; +import { UpdateHypervisorCommandInput, UpdateHypervisorCommandOutput } from "../commands/UpdateHypervisorCommand"; +import { + AccessDeniedException, + AssociateGatewayToServerInput, + AssociateGatewayToServerOutput, + ConflictException, + CreateGatewayInput, + CreateGatewayOutput, + DeleteGatewayInput, + DeleteGatewayOutput, + DeleteHypervisorInput, + DeleteHypervisorOutput, + DisassociateGatewayFromServerInput, + DisassociateGatewayFromServerOutput, + Gateway, + Hypervisor, + ImportHypervisorConfigurationInput, + ImportHypervisorConfigurationOutput, + InternalServerException, + ListGatewaysInput, + ListGatewaysOutput, + ListHypervisorsInput, + ListHypervisorsOutput, + ListTagsForResourceInput, + ListTagsForResourceOutput, + ListVirtualMachinesInput, + ListVirtualMachinesOutput, + PutMaintenanceStartTimeInput, + PutMaintenanceStartTimeOutput, + ResourceNotFoundException, + Tag, + TagResourceInput, + TagResourceOutput, + TestHypervisorConfigurationInput, + TestHypervisorConfigurationOutput, + UntagResourceInput, + UntagResourceOutput, + UpdateGatewayInformationInput, + UpdateGatewayInformationOutput, + UpdateHypervisorInput, + UpdateHypervisorOutput, + ValidationException, + VirtualMachine, +} from "../models/models_0"; + +export const serializeAws_json1_0AssociateGatewayToServerCommand = async ( + input: AssociateGatewayToServerCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.AssociateGatewayToServer", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0AssociateGatewayToServerInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0CreateGatewayCommand = async ( + input: CreateGatewayCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.CreateGateway", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0CreateGatewayInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0DeleteGatewayCommand = async ( + input: DeleteGatewayCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.DeleteGateway", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DeleteGatewayInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0DeleteHypervisorCommand = async ( + input: DeleteHypervisorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.DeleteHypervisor", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DeleteHypervisorInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0DisassociateGatewayFromServerCommand = async ( + input: DisassociateGatewayFromServerCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.DisassociateGatewayFromServer", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DisassociateGatewayFromServerInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ImportHypervisorConfigurationCommand = async ( + input: ImportHypervisorConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.ImportHypervisorConfiguration", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ImportHypervisorConfigurationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListGatewaysCommand = async ( + input: ListGatewaysCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.ListGateways", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListGatewaysInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListHypervisorsCommand = async ( + input: ListHypervisorsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.ListHypervisors", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListHypervisorsInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.ListTagsForResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListTagsForResourceInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0ListVirtualMachinesCommand = async ( + input: ListVirtualMachinesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.ListVirtualMachines", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0ListVirtualMachinesInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0PutMaintenanceStartTimeCommand = async ( + input: PutMaintenanceStartTimeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.PutMaintenanceStartTime", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0PutMaintenanceStartTimeInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.TagResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0TagResourceInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0TestHypervisorConfigurationCommand = async ( + input: TestHypervisorConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.TestHypervisorConfiguration", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0TestHypervisorConfigurationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.UntagResource", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UntagResourceInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0UpdateGatewayInformationCommand = async ( + input: UpdateGatewayInformationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.UpdateGatewayInformation", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UpdateGatewayInformationInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_0UpdateHypervisorCommand = async ( + input: UpdateHypervisorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "BackupOnPremises_v20210101.UpdateHypervisor", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0UpdateHypervisorInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const deserializeAws_json1_0AssociateGatewayToServerCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0AssociateGatewayToServerCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0AssociateGatewayToServerOutput(data, context); + const response: AssociateGatewayToServerCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0AssociateGatewayToServerCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.backupgateway#ConflictException": + response = { + ...(await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0CreateGatewayCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0CreateGatewayCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0CreateGatewayOutput(data, context); + const response: CreateGatewayCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0CreateGatewayCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0DeleteGatewayCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DeleteGatewayCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DeleteGatewayOutput(data, context); + const response: DeleteGatewayCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DeleteGatewayCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0DeleteHypervisorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DeleteHypervisorCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DeleteHypervisorOutput(data, context); + const response: DeleteHypervisorCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DeleteHypervisorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.backupgateway#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0DisassociateGatewayFromServerCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DisassociateGatewayFromServerCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DisassociateGatewayFromServerOutput(data, context); + const response: DisassociateGatewayFromServerCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DisassociateGatewayFromServerCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.backupgateway#ConflictException": + response = { + ...(await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0ImportHypervisorConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ImportHypervisorConfigurationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ImportHypervisorConfigurationOutput(data, context); + const response: ImportHypervisorConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ImportHypervisorConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.backupgateway#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0ListGatewaysCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListGatewaysCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListGatewaysOutput(data, context); + const response: ListGatewaysCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListGatewaysCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0ListHypervisorsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListHypervisorsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListHypervisorsOutput(data, context); + const response: ListHypervisorsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListHypervisorsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListTagsForResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListTagsForResourceOutput(data, context); + const response: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0ListVirtualMachinesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0ListVirtualMachinesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0ListVirtualMachinesOutput(data, context); + const response: ListVirtualMachinesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0ListVirtualMachinesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0PutMaintenanceStartTimeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0PutMaintenanceStartTimeCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0PutMaintenanceStartTimeOutput(data, context); + const response: PutMaintenanceStartTimeCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0PutMaintenanceStartTimeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.backupgateway#ConflictException": + response = { + ...(await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0TagResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0TagResourceOutput(data, context); + const response: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0TestHypervisorConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0TestHypervisorConfigurationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0TestHypervisorConfigurationOutput(data, context); + const response: TestHypervisorConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0TestHypervisorConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.backupgateway#ConflictException": + response = { + ...(await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0UntagResourceCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0UntagResourceOutput(data, context); + const response: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0UpdateGatewayInformationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0UpdateGatewayInformationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0UpdateGatewayInformationOutput(data, context); + const response: UpdateGatewayInformationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0UpdateGatewayInformationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.backupgateway#ConflictException": + response = { + ...(await deserializeAws_json1_0ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0UpdateHypervisorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0UpdateHypervisorCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0UpdateHypervisorOutput(data, context); + const response: UpdateHypervisorCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0UpdateHypervisorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.backupgateway#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.backupgateway#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.backupgateway#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.backupgateway#ValidationException": + response = { + ...(await deserializeAws_json1_0ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_json1_0AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0AccessDeniedException(body, context); + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_0ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ConflictException(body, context); + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_0InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0InternalServerException(body, context); + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_0ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ResourceNotFoundException(body, context); + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_0ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_0ValidationException(body, context); + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const serializeAws_json1_0AssociateGatewayToServerInput = ( + input: AssociateGatewayToServerInput, + context: __SerdeContext +): any => { + return { + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + ...(input.ServerArn !== undefined && input.ServerArn !== null && { ServerArn: input.ServerArn }), + }; +}; + +const serializeAws_json1_0CreateGatewayInput = (input: CreateGatewayInput, context: __SerdeContext): any => { + return { + ...(input.ActivationKey !== undefined && input.ActivationKey !== null && { ActivationKey: input.ActivationKey }), + ...(input.GatewayDisplayName !== undefined && + input.GatewayDisplayName !== null && { GatewayDisplayName: input.GatewayDisplayName }), + ...(input.GatewayType !== undefined && input.GatewayType !== null && { GatewayType: input.GatewayType }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_0Tags(input.Tags, context) }), + }; +}; + +const serializeAws_json1_0DeleteGatewayInput = (input: DeleteGatewayInput, context: __SerdeContext): any => { + return { + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + }; +}; + +const serializeAws_json1_0DeleteHypervisorInput = (input: DeleteHypervisorInput, context: __SerdeContext): any => { + return { + ...(input.HypervisorArn !== undefined && input.HypervisorArn !== null && { HypervisorArn: input.HypervisorArn }), + }; +}; + +const serializeAws_json1_0DisassociateGatewayFromServerInput = ( + input: DisassociateGatewayFromServerInput, + context: __SerdeContext +): any => { + return { + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + }; +}; + +const serializeAws_json1_0ImportHypervisorConfigurationInput = ( + input: ImportHypervisorConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.Host !== undefined && input.Host !== null && { Host: input.Host }), + ...(input.KmsKeyArn !== undefined && input.KmsKeyArn !== null && { KmsKeyArn: input.KmsKeyArn }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Password !== undefined && input.Password !== null && { Password: input.Password }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_0Tags(input.Tags, context) }), + ...(input.Username !== undefined && input.Username !== null && { Username: input.Username }), + }; +}; + +const serializeAws_json1_0ListGatewaysInput = (input: ListGatewaysInput, context: __SerdeContext): any => { + return { + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + +const serializeAws_json1_0ListHypervisorsInput = (input: ListHypervisorsInput, context: __SerdeContext): any => { + return { + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + +const serializeAws_json1_0ListTagsForResourceInput = ( + input: ListTagsForResourceInput, + context: __SerdeContext +): any => { + return { + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + }; +}; + +const serializeAws_json1_0ListVirtualMachinesInput = ( + input: ListVirtualMachinesInput, + context: __SerdeContext +): any => { + return { + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + +const serializeAws_json1_0PutMaintenanceStartTimeInput = ( + input: PutMaintenanceStartTimeInput, + context: __SerdeContext +): any => { + return { + ...(input.DayOfMonth !== undefined && input.DayOfMonth !== null && { DayOfMonth: input.DayOfMonth }), + ...(input.DayOfWeek !== undefined && input.DayOfWeek !== null && { DayOfWeek: input.DayOfWeek }), + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + ...(input.HourOfDay !== undefined && input.HourOfDay !== null && { HourOfDay: input.HourOfDay }), + ...(input.MinuteOfHour !== undefined && input.MinuteOfHour !== null && { MinuteOfHour: input.MinuteOfHour }), + }; +}; + +const serializeAws_json1_0Tag = (input: Tag, context: __SerdeContext): any => { + return { + ...(input.Key !== undefined && input.Key !== null && { Key: input.Key }), + ...(input.Value !== undefined && input.Value !== null && { Value: input.Value }), + }; +}; + +const serializeAws_json1_0TagKeys = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_0TagResourceInput = (input: TagResourceInput, context: __SerdeContext): any => { + return { + ...(input.ResourceARN !== undefined && input.ResourceARN !== null && { ResourceARN: input.ResourceARN }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_0Tags(input.Tags, context) }), + }; +}; + +const serializeAws_json1_0Tags = (input: Tag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_0Tag(entry, context); + }); +}; + +const serializeAws_json1_0TestHypervisorConfigurationInput = ( + input: TestHypervisorConfigurationInput, + context: __SerdeContext +): any => { + return { + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + ...(input.Host !== undefined && input.Host !== null && { Host: input.Host }), + ...(input.Password !== undefined && input.Password !== null && { Password: input.Password }), + ...(input.Username !== undefined && input.Username !== null && { Username: input.Username }), + }; +}; + +const serializeAws_json1_0UntagResourceInput = (input: UntagResourceInput, context: __SerdeContext): any => { + return { + ...(input.ResourceARN !== undefined && input.ResourceARN !== null && { ResourceARN: input.ResourceARN }), + ...(input.TagKeys !== undefined && + input.TagKeys !== null && { TagKeys: serializeAws_json1_0TagKeys(input.TagKeys, context) }), + }; +}; + +const serializeAws_json1_0UpdateGatewayInformationInput = ( + input: UpdateGatewayInformationInput, + context: __SerdeContext +): any => { + return { + ...(input.GatewayArn !== undefined && input.GatewayArn !== null && { GatewayArn: input.GatewayArn }), + ...(input.GatewayDisplayName !== undefined && + input.GatewayDisplayName !== null && { GatewayDisplayName: input.GatewayDisplayName }), + }; +}; + +const serializeAws_json1_0UpdateHypervisorInput = (input: UpdateHypervisorInput, context: __SerdeContext): any => { + return { + ...(input.Host !== undefined && input.Host !== null && { Host: input.Host }), + ...(input.HypervisorArn !== undefined && input.HypervisorArn !== null && { HypervisorArn: input.HypervisorArn }), + ...(input.Password !== undefined && input.Password !== null && { Password: input.Password }), + ...(input.Username !== undefined && input.Username !== null && { Username: input.Username }), + }; +}; + +const deserializeAws_json1_0AccessDeniedException = (output: any, context: __SerdeContext): AccessDeniedException => { + return { + ErrorCode: __expectString(output.ErrorCode), + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_0AssociateGatewayToServerOutput = ( + output: any, + context: __SerdeContext +): AssociateGatewayToServerOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0ConflictException = (output: any, context: __SerdeContext): ConflictException => { + return { + ErrorCode: __expectString(output.ErrorCode), + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_0CreateGatewayOutput = (output: any, context: __SerdeContext): CreateGatewayOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0DeleteGatewayOutput = (output: any, context: __SerdeContext): DeleteGatewayOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0DeleteHypervisorOutput = (output: any, context: __SerdeContext): DeleteHypervisorOutput => { + return { + HypervisorArn: __expectString(output.HypervisorArn), + } as any; +}; + +const deserializeAws_json1_0DisassociateGatewayFromServerOutput = ( + output: any, + context: __SerdeContext +): DisassociateGatewayFromServerOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0Gateway = (output: any, context: __SerdeContext): Gateway => { + return { + GatewayArn: __expectString(output.GatewayArn), + GatewayDisplayName: __expectString(output.GatewayDisplayName), + GatewayType: __expectString(output.GatewayType), + HypervisorId: __expectString(output.HypervisorId), + LastSeenTime: + output.LastSeenTime !== undefined && output.LastSeenTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastSeenTime))) + : undefined, + } as any; +}; + +const deserializeAws_json1_0Gateways = (output: any, context: __SerdeContext): Gateway[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0Gateway(entry, context); + }); +}; + +const deserializeAws_json1_0Hypervisor = (output: any, context: __SerdeContext): Hypervisor => { + return { + Host: __expectString(output.Host), + HypervisorArn: __expectString(output.HypervisorArn), + KmsKeyArn: __expectString(output.KmsKeyArn), + Name: __expectString(output.Name), + State: __expectString(output.State), + } as any; +}; + +const deserializeAws_json1_0Hypervisors = (output: any, context: __SerdeContext): Hypervisor[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0Hypervisor(entry, context); + }); +}; + +const deserializeAws_json1_0ImportHypervisorConfigurationOutput = ( + output: any, + context: __SerdeContext +): ImportHypervisorConfigurationOutput => { + return { + HypervisorArn: __expectString(output.HypervisorArn), + } as any; +}; + +const deserializeAws_json1_0InternalServerException = ( + output: any, + context: __SerdeContext +): InternalServerException => { + return { + ErrorCode: __expectString(output.ErrorCode), + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_0ListGatewaysOutput = (output: any, context: __SerdeContext): ListGatewaysOutput => { + return { + Gateways: + output.Gateways !== undefined && output.Gateways !== null + ? deserializeAws_json1_0Gateways(output.Gateways, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + +const deserializeAws_json1_0ListHypervisorsOutput = (output: any, context: __SerdeContext): ListHypervisorsOutput => { + return { + Hypervisors: + output.Hypervisors !== undefined && output.Hypervisors !== null + ? deserializeAws_json1_0Hypervisors(output.Hypervisors, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + +const deserializeAws_json1_0ListTagsForResourceOutput = ( + output: any, + context: __SerdeContext +): ListTagsForResourceOutput => { + return { + ResourceArn: __expectString(output.ResourceArn), + Tags: + output.Tags !== undefined && output.Tags !== null ? deserializeAws_json1_0Tags(output.Tags, context) : undefined, + } as any; +}; + +const deserializeAws_json1_0ListVirtualMachinesOutput = ( + output: any, + context: __SerdeContext +): ListVirtualMachinesOutput => { + return { + NextToken: __expectString(output.NextToken), + VirtualMachines: + output.VirtualMachines !== undefined && output.VirtualMachines !== null + ? deserializeAws_json1_0VirtualMachines(output.VirtualMachines, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0PutMaintenanceStartTimeOutput = ( + output: any, + context: __SerdeContext +): PutMaintenanceStartTimeOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0ResourceNotFoundException = ( + output: any, + context: __SerdeContext +): ResourceNotFoundException => { + return { + ErrorCode: __expectString(output.ErrorCode), + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_0Tag = (output: any, context: __SerdeContext): Tag => { + return { + Key: __expectString(output.Key), + Value: __expectString(output.Value), + } as any; +}; + +const deserializeAws_json1_0TagResourceOutput = (output: any, context: __SerdeContext): TagResourceOutput => { + return { + ResourceARN: __expectString(output.ResourceARN), + } as any; +}; + +const deserializeAws_json1_0Tags = (output: any, context: __SerdeContext): Tag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0Tag(entry, context); + }); +}; + +const deserializeAws_json1_0TestHypervisorConfigurationOutput = ( + output: any, + context: __SerdeContext +): TestHypervisorConfigurationOutput => { + return {} as any; +}; + +const deserializeAws_json1_0UntagResourceOutput = (output: any, context: __SerdeContext): UntagResourceOutput => { + return { + ResourceARN: __expectString(output.ResourceARN), + } as any; +}; + +const deserializeAws_json1_0UpdateGatewayInformationOutput = ( + output: any, + context: __SerdeContext +): UpdateGatewayInformationOutput => { + return { + GatewayArn: __expectString(output.GatewayArn), + } as any; +}; + +const deserializeAws_json1_0UpdateHypervisorOutput = (output: any, context: __SerdeContext): UpdateHypervisorOutput => { + return { + HypervisorArn: __expectString(output.HypervisorArn), + } as any; +}; + +const deserializeAws_json1_0ValidationException = (output: any, context: __SerdeContext): ValidationException => { + return { + ErrorCode: __expectString(output.ErrorCode), + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_0VirtualMachine = (output: any, context: __SerdeContext): VirtualMachine => { + return { + HostName: __expectString(output.HostName), + HypervisorId: __expectString(output.HypervisorId), + LastBackupDate: + output.LastBackupDate !== undefined && output.LastBackupDate !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastBackupDate))) + : undefined, + Name: __expectString(output.Name), + Path: __expectString(output.Path), + ResourceArn: __expectString(output.ResourceArn), + } as any; +}; + +const deserializeAws_json1_0VirtualMachines = (output: any, context: __SerdeContext): VirtualMachine[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0VirtualMachine(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const buildHttpRpcRequest = async ( + context: __SerdeContext, + headers: __HeaderBag, + path: string, + resolvedHostname: string | undefined, + body: any +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const contents: any = { + protocol, + hostname, + port, + method: "POST", + path: basePath.endsWith("/") ? basePath.slice(0, -1) + path : basePath + path, + headers, + }; + if (resolvedHostname !== undefined) { + contents.hostname = resolvedHostname; + } + if (body !== undefined) { + contents.body = body; + } + return new __HttpRequest(contents); +}; + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-backup-gateway/src/runtimeConfig.browser.ts b/clients/client-backup-gateway/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..1d18e4f404b0 --- /dev/null +++ b/clients/client-backup-gateway/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { BackupGatewayClientConfig } from "./BackupGatewayClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: BackupGatewayClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-backup-gateway/src/runtimeConfig.native.ts b/clients/client-backup-gateway/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..0bd46032f502 --- /dev/null +++ b/clients/client-backup-gateway/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { BackupGatewayClientConfig } from "./BackupGatewayClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: BackupGatewayClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-backup-gateway/src/runtimeConfig.shared.ts b/clients/client-backup-gateway/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..b4b610203174 --- /dev/null +++ b/clients/client-backup-gateway/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { BackupGatewayClientConfig } from "./BackupGatewayClient"; +import { defaultRegionInfoProvider } from "./endpoints"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: BackupGatewayClientConfig) => ({ + apiVersion: "2021-01-01", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "Backup Gateway", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-backup-gateway/src/runtimeConfig.ts b/clients/client-backup-gateway/src/runtimeConfig.ts new file mode 100644 index 000000000000..e2e1903aae26 --- /dev/null +++ b/clients/client-backup-gateway/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { BackupGatewayClientConfig } from "./BackupGatewayClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: BackupGatewayClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-backup-gateway/tsconfig.es.json b/clients/client-backup-gateway/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-backup-gateway/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-backup-gateway/tsconfig.json b/clients/client-backup-gateway/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-backup-gateway/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-backup-gateway/tsconfig.types.json b/clients/client-backup-gateway/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-backup-gateway/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-compute-optimizer/README.md b/clients/client-compute-optimizer/README.md index ed9547d3f7f5..1d6be4b55a77 100644 --- a/clients/client-compute-optimizer/README.md +++ b/clients/client-compute-optimizer/README.md @@ -34,19 +34,16 @@ using your favorite package manager: The AWS SDK is modulized by clients and commands. To send a request, you only need to import the `ComputeOptimizerClient` and -the commands you need, for example `DescribeRecommendationExportJobsCommand`: +the commands you need, for example `DeleteRecommendationPreferencesCommand`: ```js // ES5 example -const { - ComputeOptimizerClient, - DescribeRecommendationExportJobsCommand, -} = require("@aws-sdk/client-compute-optimizer"); +const { ComputeOptimizerClient, DeleteRecommendationPreferencesCommand } = require("@aws-sdk/client-compute-optimizer"); ``` ```ts // ES6+ example -import { ComputeOptimizerClient, DescribeRecommendationExportJobsCommand } from "@aws-sdk/client-compute-optimizer"; +import { ComputeOptimizerClient, DeleteRecommendationPreferencesCommand } from "@aws-sdk/client-compute-optimizer"; ``` ### Usage @@ -65,7 +62,7 @@ const client = new ComputeOptimizerClient({ region: "REGION" }); const params = { /** input parameters */ }; -const command = new DescribeRecommendationExportJobsCommand(params); +const command = new DeleteRecommendationPreferencesCommand(params); ``` #### Async/await @@ -144,7 +141,7 @@ const client = new AWS.ComputeOptimizer({ region: "REGION" }); // async/await. try { - const data = await client.describeRecommendationExportJobs(params); + const data = await client.deleteRecommendationPreferences(params); // process data. } catch (error) { // error handling. @@ -152,7 +149,7 @@ try { // Promises. client - .describeRecommendationExportJobs(params) + .deleteRecommendationPreferences(params) .then((data) => { // process data. }) @@ -161,7 +158,7 @@ client }); // callbacks. -client.describeRecommendationExportJobs(params, (err, data) => { +client.deleteRecommendationPreferences(params, (err, data) => { // proccess err and data. }); ``` diff --git a/clients/client-compute-optimizer/src/ComputeOptimizer.ts b/clients/client-compute-optimizer/src/ComputeOptimizer.ts index ec4d2753d765..db9042a4c286 100644 --- a/clients/client-compute-optimizer/src/ComputeOptimizer.ts +++ b/clients/client-compute-optimizer/src/ComputeOptimizer.ts @@ -1,5 +1,10 @@ import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; +import { + DeleteRecommendationPreferencesCommand, + DeleteRecommendationPreferencesCommandInput, + DeleteRecommendationPreferencesCommandOutput, +} from "./commands/DeleteRecommendationPreferencesCommand"; import { DescribeRecommendationExportJobsCommand, DescribeRecommendationExportJobsCommandInput, @@ -45,6 +50,11 @@ import { GetEC2RecommendationProjectedMetricsCommandInput, GetEC2RecommendationProjectedMetricsCommandOutput, } from "./commands/GetEC2RecommendationProjectedMetricsCommand"; +import { + GetEffectiveRecommendationPreferencesCommand, + GetEffectiveRecommendationPreferencesCommandInput, + GetEffectiveRecommendationPreferencesCommandOutput, +} from "./commands/GetEffectiveRecommendationPreferencesCommand"; import { GetEnrollmentStatusCommand, GetEnrollmentStatusCommandInput, @@ -60,11 +70,21 @@ import { GetLambdaFunctionRecommendationsCommandInput, GetLambdaFunctionRecommendationsCommandOutput, } from "./commands/GetLambdaFunctionRecommendationsCommand"; +import { + GetRecommendationPreferencesCommand, + GetRecommendationPreferencesCommandInput, + GetRecommendationPreferencesCommandOutput, +} from "./commands/GetRecommendationPreferencesCommand"; import { GetRecommendationSummariesCommand, GetRecommendationSummariesCommandInput, GetRecommendationSummariesCommandOutput, } from "./commands/GetRecommendationSummariesCommand"; +import { + PutRecommendationPreferencesCommand, + PutRecommendationPreferencesCommandInput, + PutRecommendationPreferencesCommandOutput, +} from "./commands/PutRecommendationPreferencesCommand"; import { UpdateEnrollmentStatusCommand, UpdateEnrollmentStatusCommandInput, @@ -86,9 +106,43 @@ import { ComputeOptimizerClient } from "./ComputeOptimizerClient"; * service, see the Compute Optimizer User Guide.

*/ export class ComputeOptimizer extends ComputeOptimizerClient { + /** + *

Deletes a recommendation preference, such as enhanced infrastructure metrics.

+ *

For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

+ */ + public deleteRecommendationPreferences( + args: DeleteRecommendationPreferencesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteRecommendationPreferences( + args: DeleteRecommendationPreferencesCommandInput, + cb: (err: any, data?: DeleteRecommendationPreferencesCommandOutput) => void + ): void; + public deleteRecommendationPreferences( + args: DeleteRecommendationPreferencesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteRecommendationPreferencesCommandOutput) => void + ): void; + public deleteRecommendationPreferences( + args: DeleteRecommendationPreferencesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteRecommendationPreferencesCommandOutput) => void), + cb?: (err: any, data?: DeleteRecommendationPreferencesCommandOutput) => void + ): Promise | void { + const command = new DeleteRecommendationPreferencesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Describes recommendation export jobs created in the last seven days.

- * *

Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your * recommendations. Then use the DescribeRecommendationExportJobs action * to view your export jobs.

@@ -124,12 +178,10 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Exports optimization recommendations for Auto Scaling groups.

- * *

Recommendations are exported in a comma-separated values (.csv) file, and its metadata * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

- * *

You can have only one Auto Scaling group export job in progress per Amazon Web Services Region.

*/ public exportAutoScalingGroupRecommendations( @@ -165,13 +217,10 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Exports optimization recommendations for Amazon EBS volumes.

- * *

Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

- * *

You can have only one Amazon EBS volume export job in progress per Amazon Web Services Region.

*/ public exportEBSVolumeRecommendations( @@ -205,13 +254,10 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Exports optimization recommendations for Amazon EC2 instances.

- * *

Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

- * *

You can have only one Amazon EC2 instance export job in progress per Amazon Web Services Region.

*/ public exportEC2InstanceRecommendations( @@ -245,13 +291,10 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Exports optimization recommendations for Lambda functions.

- * *

Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

- * *

You can have only one Lambda function export job in progress per Amazon Web Services Region.

*/ public exportLambdaFunctionRecommendations( @@ -285,7 +328,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns Auto Scaling group recommendations.

- * *

Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that * meet a specific set of requirements. For more information, see the Supported * resources and requirements in the Compute Optimizer User @@ -322,7 +364,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

- * *

Compute Optimizer generates recommendations for Amazon EBS volumes that * meet a specific set of requirements. For more information, see the Supported * resources and requirements in the Compute Optimizer User @@ -359,7 +400,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns Amazon EC2 instance recommendations.

- * *

Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more * information, see the Supported resources and * requirements in the Compute Optimizer User @@ -397,7 +437,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns the projected utilization metrics of Amazon EC2 instance * recommendations.

- * * *

The Cpu and Memory metrics are the only projected * utilization metrics returned when you run this action. Additionally, the @@ -434,10 +473,48 @@ export class ComputeOptimizer extends ComputeOptimizerClient { } } + /** + *

Returns the recommendation preferences that are in effect for a given resource, such + * as enhanced infrastructure metrics. Considers all applicable preferences that you might + * have set at the resource, account, and organization level.

+ *

When you create a recommendation preference, you can set its status to + * Active or Inactive. Use this action to view the + * recommendation preferences that are in effect, or Active.

+ */ + public getEffectiveRecommendationPreferences( + args: GetEffectiveRecommendationPreferencesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getEffectiveRecommendationPreferences( + args: GetEffectiveRecommendationPreferencesCommandInput, + cb: (err: any, data?: GetEffectiveRecommendationPreferencesCommandOutput) => void + ): void; + public getEffectiveRecommendationPreferences( + args: GetEffectiveRecommendationPreferencesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetEffectiveRecommendationPreferencesCommandOutput) => void + ): void; + public getEffectiveRecommendationPreferences( + args: GetEffectiveRecommendationPreferencesCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: GetEffectiveRecommendationPreferencesCommandOutput) => void), + cb?: (err: any, data?: GetEffectiveRecommendationPreferencesCommandOutput) => void + ): Promise | void { + const command = new GetEffectiveRecommendationPreferencesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Returns the enrollment (opt in) status of an account to the Compute Optimizer * service.

- * *

If the account is the management account of an organization, this action also confirms * the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information * about the enrollment status of member accounts of an organization.

@@ -474,7 +551,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns the Compute Optimizer enrollment (opt-in) status of organization member * accounts, if your account is an organization management account.

- * *

To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

*/ public getEnrollmentStatusesForOrganization( @@ -508,7 +584,6 @@ export class ComputeOptimizer extends ComputeOptimizerClient { /** *

Returns Lambda function recommendations.

- * *

Compute Optimizer generates recommendations for functions that meet a specific set * of requirements. For more information, see the Supported resources and * requirements in the Compute Optimizer User @@ -543,11 +618,48 @@ export class ComputeOptimizer extends ComputeOptimizerClient { } } + /** + *

Returns existing recommendation preferences, such as enhanced infrastructure + * metrics.

+ *

Use the scope parameter to specify which preferences to return. You can + * specify to return preferences for an organization, a specific account ID, or a specific + * EC2 instance or Auto Scaling group Amazon Resource Name (ARN).

+ *

For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

+ */ + public getRecommendationPreferences( + args: GetRecommendationPreferencesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getRecommendationPreferences( + args: GetRecommendationPreferencesCommandInput, + cb: (err: any, data?: GetRecommendationPreferencesCommandOutput) => void + ): void; + public getRecommendationPreferences( + args: GetRecommendationPreferencesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetRecommendationPreferencesCommandOutput) => void + ): void; + public getRecommendationPreferences( + args: GetRecommendationPreferencesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetRecommendationPreferencesCommandOutput) => void), + cb?: (err: any, data?: GetRecommendationPreferencesCommandOutput) => void + ): Promise | void { + const command = new GetRecommendationPreferencesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

Returns the optimization findings for an account.

- * *

It returns the number of:

- * *
    *
  • *

    Amazon EC2 instances in an account that are @@ -597,15 +709,48 @@ export class ComputeOptimizer extends ComputeOptimizerClient { } } + /** + *

    Creates a new recommendation preference or updates an existing recommendation + * preference, such as enhanced infrastructure metrics.

    + *

    For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

    + */ + public putRecommendationPreferences( + args: PutRecommendationPreferencesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putRecommendationPreferences( + args: PutRecommendationPreferencesCommandInput, + cb: (err: any, data?: PutRecommendationPreferencesCommandOutput) => void + ): void; + public putRecommendationPreferences( + args: PutRecommendationPreferencesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutRecommendationPreferencesCommandOutput) => void + ): void; + public putRecommendationPreferences( + args: PutRecommendationPreferencesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: PutRecommendationPreferencesCommandOutput) => void), + cb?: (err: any, data?: PutRecommendationPreferencesCommandOutput) => void + ): Promise | void { + const command = new PutRecommendationPreferencesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

    Updates the enrollment (opt in and opt out) status of an account to the Compute Optimizer service.

    - * *

    If the account is a management account of an organization, this action can also be * used to enroll member accounts of the organization.

    - * *

    You must have the appropriate permissions to opt in to Compute Optimizer, to view its * recommendations, and to opt out. For more information, see Controlling access with Amazon Web Services Identity and Access Management in the Compute Optimizer User Guide.

    - * *

    When you opt in, Compute Optimizer automatically creates a service-linked role in your * account to access its data. For more information, see Using * Service-Linked Roles for Compute Optimizer in the Compute Optimizer User Guide.

    diff --git a/clients/client-compute-optimizer/src/ComputeOptimizerClient.ts b/clients/client-compute-optimizer/src/ComputeOptimizerClient.ts index ec7e99f2fd86..cc00d09e69d7 100644 --- a/clients/client-compute-optimizer/src/ComputeOptimizerClient.ts +++ b/clients/client-compute-optimizer/src/ComputeOptimizerClient.ts @@ -49,6 +49,10 @@ import { UserAgent as __UserAgent, } from "@aws-sdk/types"; +import { + DeleteRecommendationPreferencesCommandInput, + DeleteRecommendationPreferencesCommandOutput, +} from "./commands/DeleteRecommendationPreferencesCommand"; import { DescribeRecommendationExportJobsCommandInput, DescribeRecommendationExportJobsCommandOutput, @@ -85,6 +89,10 @@ import { GetEC2RecommendationProjectedMetricsCommandInput, GetEC2RecommendationProjectedMetricsCommandOutput, } from "./commands/GetEC2RecommendationProjectedMetricsCommand"; +import { + GetEffectiveRecommendationPreferencesCommandInput, + GetEffectiveRecommendationPreferencesCommandOutput, +} from "./commands/GetEffectiveRecommendationPreferencesCommand"; import { GetEnrollmentStatusCommandInput, GetEnrollmentStatusCommandOutput, @@ -97,10 +105,18 @@ import { GetLambdaFunctionRecommendationsCommandInput, GetLambdaFunctionRecommendationsCommandOutput, } from "./commands/GetLambdaFunctionRecommendationsCommand"; +import { + GetRecommendationPreferencesCommandInput, + GetRecommendationPreferencesCommandOutput, +} from "./commands/GetRecommendationPreferencesCommand"; import { GetRecommendationSummariesCommandInput, GetRecommendationSummariesCommandOutput, } from "./commands/GetRecommendationSummariesCommand"; +import { + PutRecommendationPreferencesCommandInput, + PutRecommendationPreferencesCommandOutput, +} from "./commands/PutRecommendationPreferencesCommand"; import { UpdateEnrollmentStatusCommandInput, UpdateEnrollmentStatusCommandOutput, @@ -108,6 +124,7 @@ import { import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = + | DeleteRecommendationPreferencesCommandInput | DescribeRecommendationExportJobsCommandInput | ExportAutoScalingGroupRecommendationsCommandInput | ExportEBSVolumeRecommendationsCommandInput @@ -117,13 +134,17 @@ export type ServiceInputTypes = | GetEBSVolumeRecommendationsCommandInput | GetEC2InstanceRecommendationsCommandInput | GetEC2RecommendationProjectedMetricsCommandInput + | GetEffectiveRecommendationPreferencesCommandInput | GetEnrollmentStatusCommandInput | GetEnrollmentStatusesForOrganizationCommandInput | GetLambdaFunctionRecommendationsCommandInput + | GetRecommendationPreferencesCommandInput | GetRecommendationSummariesCommandInput + | PutRecommendationPreferencesCommandInput | UpdateEnrollmentStatusCommandInput; export type ServiceOutputTypes = + | DeleteRecommendationPreferencesCommandOutput | DescribeRecommendationExportJobsCommandOutput | ExportAutoScalingGroupRecommendationsCommandOutput | ExportEBSVolumeRecommendationsCommandOutput @@ -133,10 +154,13 @@ export type ServiceOutputTypes = | GetEBSVolumeRecommendationsCommandOutput | GetEC2InstanceRecommendationsCommandOutput | GetEC2RecommendationProjectedMetricsCommandOutput + | GetEffectiveRecommendationPreferencesCommandOutput | GetEnrollmentStatusCommandOutput | GetEnrollmentStatusesForOrganizationCommandOutput | GetLambdaFunctionRecommendationsCommandOutput + | GetRecommendationPreferencesCommandOutput | GetRecommendationSummariesCommandOutput + | PutRecommendationPreferencesCommandOutput | UpdateEnrollmentStatusCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { diff --git a/clients/client-compute-optimizer/src/commands/DeleteRecommendationPreferencesCommand.ts b/clients/client-compute-optimizer/src/commands/DeleteRecommendationPreferencesCommand.ts new file mode 100644 index 000000000000..245fb4c174e1 --- /dev/null +++ b/clients/client-compute-optimizer/src/commands/DeleteRecommendationPreferencesCommand.ts @@ -0,0 +1,106 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComputeOptimizerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComputeOptimizerClient"; +import { DeleteRecommendationPreferencesRequest, DeleteRecommendationPreferencesResponse } from "../models/models_0"; +import { + deserializeAws_json1_0DeleteRecommendationPreferencesCommand, + serializeAws_json1_0DeleteRecommendationPreferencesCommand, +} from "../protocols/Aws_json1_0"; + +export interface DeleteRecommendationPreferencesCommandInput extends DeleteRecommendationPreferencesRequest {} +export interface DeleteRecommendationPreferencesCommandOutput + extends DeleteRecommendationPreferencesResponse, + __MetadataBearer {} + +/** + *

    Deletes a recommendation preference, such as enhanced infrastructure metrics.

    + *

    For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComputeOptimizerClient, DeleteRecommendationPreferencesCommand } from "@aws-sdk/client-compute-optimizer"; // ES Modules import + * // const { ComputeOptimizerClient, DeleteRecommendationPreferencesCommand } = require("@aws-sdk/client-compute-optimizer"); // CommonJS import + * const client = new ComputeOptimizerClient(config); + * const command = new DeleteRecommendationPreferencesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteRecommendationPreferencesCommandInput} for command's `input` shape. + * @see {@link DeleteRecommendationPreferencesCommandOutput} for command's `response` shape. + * @see {@link ComputeOptimizerClientResolvedConfig | config} for ComputeOptimizerClient's `config` shape. + * + */ +export class DeleteRecommendationPreferencesCommand extends $Command< + DeleteRecommendationPreferencesCommandInput, + DeleteRecommendationPreferencesCommandOutput, + ComputeOptimizerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteRecommendationPreferencesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComputeOptimizerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComputeOptimizerClient"; + const commandName = "DeleteRecommendationPreferencesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteRecommendationPreferencesRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteRecommendationPreferencesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DeleteRecommendationPreferencesCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_0DeleteRecommendationPreferencesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0DeleteRecommendationPreferencesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-compute-optimizer/src/commands/DescribeRecommendationExportJobsCommand.ts b/clients/client-compute-optimizer/src/commands/DescribeRecommendationExportJobsCommand.ts index 847dbef1831a..dad2a1714849 100644 --- a/clients/client-compute-optimizer/src/commands/DescribeRecommendationExportJobsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/DescribeRecommendationExportJobsCommand.ts @@ -25,7 +25,6 @@ export interface DescribeRecommendationExportJobsCommandOutput /** *

    Describes recommendation export jobs created in the last seven days.

    - * *

    Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your * recommendations. Then use the DescribeRecommendationExportJobs action * to view your export jobs.

    diff --git a/clients/client-compute-optimizer/src/commands/ExportAutoScalingGroupRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/ExportAutoScalingGroupRecommendationsCommand.ts index afd3f81313a1..8097d1f66d13 100644 --- a/clients/client-compute-optimizer/src/commands/ExportAutoScalingGroupRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/ExportAutoScalingGroupRecommendationsCommand.ts @@ -29,12 +29,10 @@ export interface ExportAutoScalingGroupRecommendationsCommandOutput /** *

    Exports optimization recommendations for Auto Scaling groups.

    - * *

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

    - * *

    You can have only one Auto Scaling group export job in progress per Amazon Web Services Region.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-compute-optimizer/src/commands/ExportEBSVolumeRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/ExportEBSVolumeRecommendationsCommand.ts index 3cc99fcbae3d..a11970160cbe 100644 --- a/clients/client-compute-optimizer/src/commands/ExportEBSVolumeRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/ExportEBSVolumeRecommendationsCommand.ts @@ -25,13 +25,10 @@ export interface ExportEBSVolumeRecommendationsCommandOutput /** *

    Exports optimization recommendations for Amazon EBS volumes.

    - * *

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

    - * *

    You can have only one Amazon EBS volume export job in progress per Amazon Web Services Region.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-compute-optimizer/src/commands/ExportEC2InstanceRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/ExportEC2InstanceRecommendationsCommand.ts index 59fd7ab7739c..99ad64527b0e 100644 --- a/clients/client-compute-optimizer/src/commands/ExportEC2InstanceRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/ExportEC2InstanceRecommendationsCommand.ts @@ -25,13 +25,10 @@ export interface ExportEC2InstanceRecommendationsCommandOutput /** *

    Exports optimization recommendations for Amazon EC2 instances.

    - * *

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

    - * *

    You can have only one Amazon EC2 instance export job in progress per Amazon Web Services Region.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-compute-optimizer/src/commands/ExportLambdaFunctionRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/ExportLambdaFunctionRecommendationsCommand.ts index ef5760b02f2b..ec905e965d22 100644 --- a/clients/client-compute-optimizer/src/commands/ExportLambdaFunctionRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/ExportLambdaFunctionRecommendationsCommand.ts @@ -28,13 +28,10 @@ export interface ExportLambdaFunctionRecommendationsCommandOutput /** *

    Exports optimization recommendations for Lambda functions.

    - * *

    Recommendations are exported in a comma-separated values (.csv) file, and its metadata - * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see - * Exporting + * in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting * Recommendations in the Compute Optimizer User * Guide.

    - * *

    You can have only one Lambda function export job in progress per Amazon Web Services Region.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-compute-optimizer/src/commands/GetAutoScalingGroupRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/GetAutoScalingGroupRecommendationsCommand.ts index 1ee7f342ab24..840a942a54c3 100644 --- a/clients/client-compute-optimizer/src/commands/GetAutoScalingGroupRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetAutoScalingGroupRecommendationsCommand.ts @@ -28,7 +28,6 @@ export interface GetAutoScalingGroupRecommendationsCommandOutput /** *

    Returns Auto Scaling group recommendations.

    - * *

    Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that * meet a specific set of requirements. For more information, see the Supported * resources and requirements in the Compute Optimizer User diff --git a/clients/client-compute-optimizer/src/commands/GetEBSVolumeRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/GetEBSVolumeRecommendationsCommand.ts index 9e3b6ee02abd..dce6dd6ad9a2 100644 --- a/clients/client-compute-optimizer/src/commands/GetEBSVolumeRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetEBSVolumeRecommendationsCommand.ts @@ -25,7 +25,6 @@ export interface GetEBSVolumeRecommendationsCommandOutput /** *

    Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

    - * *

    Compute Optimizer generates recommendations for Amazon EBS volumes that * meet a specific set of requirements. For more information, see the Supported * resources and requirements in the Compute Optimizer User diff --git a/clients/client-compute-optimizer/src/commands/GetEC2InstanceRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/GetEC2InstanceRecommendationsCommand.ts index e935f7060260..0dcbe8901ee4 100644 --- a/clients/client-compute-optimizer/src/commands/GetEC2InstanceRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetEC2InstanceRecommendationsCommand.ts @@ -25,7 +25,6 @@ export interface GetEC2InstanceRecommendationsCommandOutput /** *

    Returns Amazon EC2 instance recommendations.

    - * *

    Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more * information, see the Supported resources and * requirements in the Compute Optimizer User diff --git a/clients/client-compute-optimizer/src/commands/GetEC2RecommendationProjectedMetricsCommand.ts b/clients/client-compute-optimizer/src/commands/GetEC2RecommendationProjectedMetricsCommand.ts index 47e36918172c..e0dfc36103c8 100644 --- a/clients/client-compute-optimizer/src/commands/GetEC2RecommendationProjectedMetricsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetEC2RecommendationProjectedMetricsCommand.ts @@ -29,7 +29,6 @@ export interface GetEC2RecommendationProjectedMetricsCommandOutput /** *

    Returns the projected utilization metrics of Amazon EC2 instance * recommendations.

    - * * *

    The Cpu and Memory metrics are the only projected * utilization metrics returned when you run this action. Additionally, the diff --git a/clients/client-compute-optimizer/src/commands/GetEffectiveRecommendationPreferencesCommand.ts b/clients/client-compute-optimizer/src/commands/GetEffectiveRecommendationPreferencesCommand.ts new file mode 100644 index 000000000000..ccd3c09ce05d --- /dev/null +++ b/clients/client-compute-optimizer/src/commands/GetEffectiveRecommendationPreferencesCommand.ts @@ -0,0 +1,112 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComputeOptimizerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComputeOptimizerClient"; +import { + GetEffectiveRecommendationPreferencesRequest, + GetEffectiveRecommendationPreferencesResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_0GetEffectiveRecommendationPreferencesCommand, + serializeAws_json1_0GetEffectiveRecommendationPreferencesCommand, +} from "../protocols/Aws_json1_0"; + +export interface GetEffectiveRecommendationPreferencesCommandInput + extends GetEffectiveRecommendationPreferencesRequest {} +export interface GetEffectiveRecommendationPreferencesCommandOutput + extends GetEffectiveRecommendationPreferencesResponse, + __MetadataBearer {} + +/** + *

    Returns the recommendation preferences that are in effect for a given resource, such + * as enhanced infrastructure metrics. Considers all applicable preferences that you might + * have set at the resource, account, and organization level.

    + *

    When you create a recommendation preference, you can set its status to + * Active or Inactive. Use this action to view the + * recommendation preferences that are in effect, or Active.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComputeOptimizerClient, GetEffectiveRecommendationPreferencesCommand } from "@aws-sdk/client-compute-optimizer"; // ES Modules import + * // const { ComputeOptimizerClient, GetEffectiveRecommendationPreferencesCommand } = require("@aws-sdk/client-compute-optimizer"); // CommonJS import + * const client = new ComputeOptimizerClient(config); + * const command = new GetEffectiveRecommendationPreferencesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetEffectiveRecommendationPreferencesCommandInput} for command's `input` shape. + * @see {@link GetEffectiveRecommendationPreferencesCommandOutput} for command's `response` shape. + * @see {@link ComputeOptimizerClientResolvedConfig | config} for ComputeOptimizerClient's `config` shape. + * + */ +export class GetEffectiveRecommendationPreferencesCommand extends $Command< + GetEffectiveRecommendationPreferencesCommandInput, + GetEffectiveRecommendationPreferencesCommandOutput, + ComputeOptimizerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetEffectiveRecommendationPreferencesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComputeOptimizerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComputeOptimizerClient"; + const commandName = "GetEffectiveRecommendationPreferencesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetEffectiveRecommendationPreferencesRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetEffectiveRecommendationPreferencesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetEffectiveRecommendationPreferencesCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_0GetEffectiveRecommendationPreferencesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0GetEffectiveRecommendationPreferencesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusCommand.ts b/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusCommand.ts index fa1270c7fa3c..043ef0ee6e42 100644 --- a/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusCommand.ts @@ -24,7 +24,6 @@ export interface GetEnrollmentStatusCommandOutput extends GetEnrollmentStatusRes /** *

    Returns the enrollment (opt in) status of an account to the Compute Optimizer * service.

    - * *

    If the account is the management account of an organization, this action also confirms * the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information * about the enrollment status of member accounts of an organization.

    diff --git a/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusesForOrganizationCommand.ts b/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusesForOrganizationCommand.ts index b39350b3b9fe..b7ee1d5947db 100644 --- a/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusesForOrganizationCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetEnrollmentStatusesForOrganizationCommand.ts @@ -29,7 +29,6 @@ export interface GetEnrollmentStatusesForOrganizationCommandOutput /** *

    Returns the Compute Optimizer enrollment (opt-in) status of organization member * accounts, if your account is an organization management account.

    - * *

    To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

    * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-compute-optimizer/src/commands/GetLambdaFunctionRecommendationsCommand.ts b/clients/client-compute-optimizer/src/commands/GetLambdaFunctionRecommendationsCommand.ts index b8ed0920ca3c..b797ff47b45a 100644 --- a/clients/client-compute-optimizer/src/commands/GetLambdaFunctionRecommendationsCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetLambdaFunctionRecommendationsCommand.ts @@ -25,7 +25,6 @@ export interface GetLambdaFunctionRecommendationsCommandOutput /** *

    Returns Lambda function recommendations.

    - * *

    Compute Optimizer generates recommendations for functions that meet a specific set * of requirements. For more information, see the Supported resources and * requirements in the Compute Optimizer User diff --git a/clients/client-compute-optimizer/src/commands/GetRecommendationPreferencesCommand.ts b/clients/client-compute-optimizer/src/commands/GetRecommendationPreferencesCommand.ts new file mode 100644 index 000000000000..9785d2d4647f --- /dev/null +++ b/clients/client-compute-optimizer/src/commands/GetRecommendationPreferencesCommand.ts @@ -0,0 +1,107 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComputeOptimizerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComputeOptimizerClient"; +import { GetRecommendationPreferencesRequest, GetRecommendationPreferencesResponse } from "../models/models_0"; +import { + deserializeAws_json1_0GetRecommendationPreferencesCommand, + serializeAws_json1_0GetRecommendationPreferencesCommand, +} from "../protocols/Aws_json1_0"; + +export interface GetRecommendationPreferencesCommandInput extends GetRecommendationPreferencesRequest {} +export interface GetRecommendationPreferencesCommandOutput + extends GetRecommendationPreferencesResponse, + __MetadataBearer {} + +/** + *

    Returns existing recommendation preferences, such as enhanced infrastructure + * metrics.

    + *

    Use the scope parameter to specify which preferences to return. You can + * specify to return preferences for an organization, a specific account ID, or a specific + * EC2 instance or Auto Scaling group Amazon Resource Name (ARN).

    + *

    For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

    + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComputeOptimizerClient, GetRecommendationPreferencesCommand } from "@aws-sdk/client-compute-optimizer"; // ES Modules import + * // const { ComputeOptimizerClient, GetRecommendationPreferencesCommand } = require("@aws-sdk/client-compute-optimizer"); // CommonJS import + * const client = new ComputeOptimizerClient(config); + * const command = new GetRecommendationPreferencesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetRecommendationPreferencesCommandInput} for command's `input` shape. + * @see {@link GetRecommendationPreferencesCommandOutput} for command's `response` shape. + * @see {@link ComputeOptimizerClientResolvedConfig | config} for ComputeOptimizerClient's `config` shape. + * + */ +export class GetRecommendationPreferencesCommand extends $Command< + GetRecommendationPreferencesCommandInput, + GetRecommendationPreferencesCommandOutput, + ComputeOptimizerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetRecommendationPreferencesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComputeOptimizerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComputeOptimizerClient"; + const commandName = "GetRecommendationPreferencesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetRecommendationPreferencesRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetRecommendationPreferencesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetRecommendationPreferencesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0GetRecommendationPreferencesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0GetRecommendationPreferencesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-compute-optimizer/src/commands/GetRecommendationSummariesCommand.ts b/clients/client-compute-optimizer/src/commands/GetRecommendationSummariesCommand.ts index d017ca205e4f..a09e731e293a 100644 --- a/clients/client-compute-optimizer/src/commands/GetRecommendationSummariesCommand.ts +++ b/clients/client-compute-optimizer/src/commands/GetRecommendationSummariesCommand.ts @@ -23,9 +23,7 @@ export interface GetRecommendationSummariesCommandOutput extends GetRecommendati /** *

    Returns the optimization findings for an account.

    - * *

    It returns the number of:

    - * *
      *
    • *

      Amazon EC2 instances in an account that are diff --git a/clients/client-compute-optimizer/src/commands/PutRecommendationPreferencesCommand.ts b/clients/client-compute-optimizer/src/commands/PutRecommendationPreferencesCommand.ts new file mode 100644 index 000000000000..7afdd3e26709 --- /dev/null +++ b/clients/client-compute-optimizer/src/commands/PutRecommendationPreferencesCommand.ts @@ -0,0 +1,104 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ComputeOptimizerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ComputeOptimizerClient"; +import { PutRecommendationPreferencesRequest, PutRecommendationPreferencesResponse } from "../models/models_0"; +import { + deserializeAws_json1_0PutRecommendationPreferencesCommand, + serializeAws_json1_0PutRecommendationPreferencesCommand, +} from "../protocols/Aws_json1_0"; + +export interface PutRecommendationPreferencesCommandInput extends PutRecommendationPreferencesRequest {} +export interface PutRecommendationPreferencesCommandOutput + extends PutRecommendationPreferencesResponse, + __MetadataBearer {} + +/** + *

      Creates a new recommendation preference or updates an existing recommendation + * preference, such as enhanced infrastructure metrics.

      + *

      For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ComputeOptimizerClient, PutRecommendationPreferencesCommand } from "@aws-sdk/client-compute-optimizer"; // ES Modules import + * // const { ComputeOptimizerClient, PutRecommendationPreferencesCommand } = require("@aws-sdk/client-compute-optimizer"); // CommonJS import + * const client = new ComputeOptimizerClient(config); + * const command = new PutRecommendationPreferencesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutRecommendationPreferencesCommandInput} for command's `input` shape. + * @see {@link PutRecommendationPreferencesCommandOutput} for command's `response` shape. + * @see {@link ComputeOptimizerClientResolvedConfig | config} for ComputeOptimizerClient's `config` shape. + * + */ +export class PutRecommendationPreferencesCommand extends $Command< + PutRecommendationPreferencesCommandInput, + PutRecommendationPreferencesCommandOutput, + ComputeOptimizerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutRecommendationPreferencesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ComputeOptimizerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ComputeOptimizerClient"; + const commandName = "PutRecommendationPreferencesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutRecommendationPreferencesRequest.filterSensitiveLog, + outputFilterSensitiveLog: PutRecommendationPreferencesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: PutRecommendationPreferencesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_0PutRecommendationPreferencesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_0PutRecommendationPreferencesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-compute-optimizer/src/commands/UpdateEnrollmentStatusCommand.ts b/clients/client-compute-optimizer/src/commands/UpdateEnrollmentStatusCommand.ts index 3892d787571a..3c52b13926f0 100644 --- a/clients/client-compute-optimizer/src/commands/UpdateEnrollmentStatusCommand.ts +++ b/clients/client-compute-optimizer/src/commands/UpdateEnrollmentStatusCommand.ts @@ -23,13 +23,10 @@ export interface UpdateEnrollmentStatusCommandOutput extends UpdateEnrollmentSta /** *

      Updates the enrollment (opt in and opt out) status of an account to the Compute Optimizer service.

      - * *

      If the account is a management account of an organization, this action can also be * used to enroll member accounts of the organization.

      - * *

      You must have the appropriate permissions to opt in to Compute Optimizer, to view its * recommendations, and to opt out. For more information, see Controlling access with Amazon Web Services Identity and Access Management in the Compute Optimizer User Guide.

      - * *

      When you opt in, Compute Optimizer automatically creates a service-linked role in your * account to access its data. For more information, see Using * Service-Linked Roles for Compute Optimizer in the Compute Optimizer User Guide.

      diff --git a/clients/client-compute-optimizer/src/commands/index.ts b/clients/client-compute-optimizer/src/commands/index.ts index a1755ad5d529..e53efc4da422 100644 --- a/clients/client-compute-optimizer/src/commands/index.ts +++ b/clients/client-compute-optimizer/src/commands/index.ts @@ -1,3 +1,4 @@ +export * from "./DeleteRecommendationPreferencesCommand"; export * from "./DescribeRecommendationExportJobsCommand"; export * from "./ExportAutoScalingGroupRecommendationsCommand"; export * from "./ExportEBSVolumeRecommendationsCommand"; @@ -7,8 +8,11 @@ export * from "./GetAutoScalingGroupRecommendationsCommand"; export * from "./GetEBSVolumeRecommendationsCommand"; export * from "./GetEC2InstanceRecommendationsCommand"; export * from "./GetEC2RecommendationProjectedMetricsCommand"; +export * from "./GetEffectiveRecommendationPreferencesCommand"; export * from "./GetEnrollmentStatusCommand"; export * from "./GetEnrollmentStatusesForOrganizationCommand"; export * from "./GetLambdaFunctionRecommendationsCommand"; +export * from "./GetRecommendationPreferencesCommand"; export * from "./GetRecommendationSummariesCommand"; +export * from "./PutRecommendationPreferencesCommand"; export * from "./UpdateEnrollmentStatusCommand"; diff --git a/clients/client-compute-optimizer/src/models/models_0.ts b/clients/client-compute-optimizer/src/models/models_0.ts index 211c72c52804..b5858a376cd5 100644 --- a/clients/client-compute-optimizer/src/models/models_0.ts +++ b/clients/client-compute-optimizer/src/models/models_0.ts @@ -41,7 +41,6 @@ export interface AccountEnrollmentStatus { /** *

      The reason for the account enrollment status.

      - * *

      For example, an account might show a status of Pending because member * accounts of an organization require more time to be enrolled in the service.

      */ @@ -99,6 +98,67 @@ export namespace AutoScalingGroupConfiguration { }); } +export enum CurrentPerformanceRisk { + HIGH = "High", + LOW = "Low", + MEDIUM = "Medium", + VERY_LOW = "VeryLow", +} + +export enum CpuVendorArchitecture { + AWS_ARM64 = "AWS_ARM64", + CURRENT = "CURRENT", +} + +export enum EnhancedInfrastructureMetrics { + ACTIVE = "Active", + INACTIVE = "Inactive", +} + +/** + *

      Describes the effective recommendation preferences for a resource.

      + */ +export interface EffectiveRecommendationPreferences { + /** + *

      Describes the CPU vendor and architecture for an instance or Auto Scaling group + * recommendations.

      + *

      For example, when you specify AWS_ARM64 with:

      + * + */ + cpuVendorArchitectures?: (CpuVendorArchitecture | string)[]; + + /** + *

      Describes the activation status of the enhanced infrastructure metrics + * preference.

      + *

      A status of Active confirms that the preference is applied in the latest + * recommendation refresh, and a status of Inactive confirms that it's not yet + * applied.

      + */ + enhancedInfrastructureMetrics?: EnhancedInfrastructureMetrics | string; +} + +export namespace EffectiveRecommendationPreferences { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EffectiveRecommendationPreferences): any => ({ + ...obj, + }); +} + export enum Finding { NOT_OPTIMIZED = "NotOptimized", OPTIMIZED = "Optimized", @@ -131,7 +191,6 @@ export enum MetricStatistic { /** *

      Describes a utilization metric of a resource, such as an Amazon EC2 * instance.

      - * *

      Compare the utilization metric data of your resource against its projected utilization * metric data to determine the performance difference between your current resource and * the recommended option.

      @@ -139,7 +198,6 @@ export enum MetricStatistic { export interface UtilizationMetric { /** *

      The name of the utilization metric.

      - * *

      The following utilization metrics are available:

      *
        *
      • @@ -253,12 +311,9 @@ export interface UtilizationMetric { /** *

        The statistic of the utilization metric.

        - * *

        The Compute Optimizer API, Command Line Interface (CLI), and SDKs * return utilization metrics using only the Maximum statistic, which is the * highest value observed during the specified period.

        - * - * *

        The Compute Optimizer console displays graphs for some utilization metrics using the * Average statistic, which is the value of Sum / * SampleCount during the specified period. For more information, see @@ -285,6 +340,79 @@ export namespace UtilizationMetric { }); } +export enum Currency { + CNY = "CNY", + USD = "USD", +} + +/** + *

        Describes the estimated monthly savings amount possible for a given resource based on + * On-Demand instance pricing

        + *

        For more information, see Estimated monthly savings and savings opportunities in the + * Compute Optimizer User Guide.

        + */ +export interface EstimatedMonthlySavings { + /** + *

        The currency of the estimated monthly + * savings.

        + */ + currency?: Currency | string; + + /** + *

        The value of the estimated monthly savings.

        + */ + value?: number; +} + +export namespace EstimatedMonthlySavings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EstimatedMonthlySavings): any => ({ + ...obj, + }); +} + +/** + *

        Describes the savings opportunity for recommendations of a given resource type or for + * the recommendation option of an individual resource.

        + *

        Savings opportunity represents the estimated monthly savings you can achieve by + * implementing a given Compute Optimizer recommendation.

        + * + *

        Savings opportunity data requires that you opt in to Cost Explorer, as well as + * activate Receive Amazon EC2 resource + * recommendations in the Cost Explorer preferences page. That + * creates a connection between Cost Explorer and Compute Optimizer. With this + * connection, Cost Explorer generates savings estimates considering the price of + * existing resources, the price of recommended resources, and historical usage data. + * Estimated monthly savings reflects the projected dollar savings associated with each + * of the recommendations generated. For more information, see Enabling Cost Explorer and Optimizing your cost + * with Rightsizing Recommendations in the Cost Management User + * Guide.

        + *
        + */ +export interface SavingsOpportunity { + /** + *

        The estimated monthly savings possible as a percentage of monthly cost.

        + */ + savingsOpportunityPercentage?: number; + + /** + *

        An object that describes the estimated monthly savings amount possible based on + * On-Demand instance pricing.

        + */ + estimatedMonthlySavings?: EstimatedMonthlySavings; +} + +export namespace SavingsOpportunity { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SavingsOpportunity): any => ({ + ...obj, + }); +} + /** *

        Describes a recommendation option for an Auto Scaling group.

        */ @@ -296,7 +424,6 @@ export interface AutoScalingGroupRecommendationOption { /** *

        An array of objects that describe the projected utilization metrics of the Auto Scaling group recommendation option.

        - * * *

        The Cpu and Memory metrics are the only projected * utilization metrics returned. Additionally, the Memory metric is @@ -310,7 +437,6 @@ export interface AutoScalingGroupRecommendationOption { /** *

        The performance risk of the Auto Scaling group configuration * recommendation.

        - * *

        Performance risk indicates the likelihood of the recommended instance type not meeting * the resource needs of your workload. Compute Optimizer calculates an individual * performance risk score for each specification of the recommended instance, including @@ -319,7 +445,6 @@ export interface AutoScalingGroupRecommendationOption { * The performance * risk of the recommended instance is calculated as the maximum performance risk score * across the analyzed resource specifications.

        - * *

        The value ranges from 0 - 4, with 0 meaning * that the recommended resource is predicted to always provide enough hardware capability. * The higher the performance risk is, the more likely you should validate whether the @@ -330,10 +455,16 @@ export interface AutoScalingGroupRecommendationOption { /** *

        The rank of the Auto Scaling group recommendation option.

        - * *

        The top recommendation option is ranked as 1.

        */ rank?: number; + + /** + *

        An object that describes the savings opportunity for the Auto Scaling group + * recommendation option. Savings opportunity includes the estimated monthly savings amount + * and percentage.

        + */ + savingsOpportunity?: SavingsOpportunity; } export namespace AutoScalingGroupRecommendationOption { @@ -366,9 +497,7 @@ export interface AutoScalingGroupRecommendation { /** *

        The finding classification of the Auto Scaling group.

        - * *

        Findings for Auto Scaling groups include:

        - * *
          *
        • *

          @@ -415,10 +544,22 @@ export interface AutoScalingGroupRecommendation { recommendationOptions?: AutoScalingGroupRecommendationOption[]; /** - *

          The timestamp of when the Auto Scaling group recommendation was last - * refreshed.

          + *

          The timestamp of when the Auto Scaling group recommendation was last + * generated.

          */ lastRefreshTimestamp?: Date; + + /** + *

          The risk of the current Auto Scaling group not meeting the performance needs of + * its workloads. The higher the risk, the more likely the current Auto Scaling group + * configuration has insufficient capacity and cannot meet workload requirements.

          + */ + currentPerformanceRisk?: CurrentPerformanceRisk | string; + + /** + *

          An object that describes the effective recommendation preferences for the Auto Scaling group.

          + */ + effectiveRecommendationPreferences?: EffectiveRecommendationPreferences; } export namespace AutoScalingGroupRecommendation { @@ -430,6 +571,273 @@ export namespace AutoScalingGroupRecommendation { }); } +export enum RecommendationPreferenceName { + ENHANCED_INFRASTRUCTURE_METRICS = "EnhancedInfrastructureMetrics", +} + +export enum ResourceType { + AUTO_SCALING_GROUP = "AutoScalingGroup", + EBS_VOLUME = "EbsVolume", + EC2_INSTANCE = "Ec2Instance", + LAMBDA_FUNCTION = "LambdaFunction", +} + +export enum ScopeName { + ACCOUNT_ID = "AccountId", + ORGANIZATION = "Organization", + RESOURCE_ARN = "ResourceArn", +} + +/** + *

          Describes the scope of a recommendation preference.

          + *

          Recommendation preferences can be created at the organization level (for management + * accounts of an organization only), account level, and resource level. For more + * information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

          + * + *

          You cannot create recommendation preferences for Auto Scaling groups at the + * organization and account levels. You can create recommendation preferences for + * Auto Scaling groups only at the resource level by specifying a scope name + * of ResourceArn and a scope value of the Auto Scaling group Amazon + * Resource Name (ARN). This will configure the preference for all instances that are + * part of the specified the Auto Scaling group.

          + *
          + */ +export interface Scope { + /** + *

          The name of the scope.

          + *

          The following scopes are possible:

          + *
            + *
          • + *

            + * Organization - Specifies that the recommendation preference + * applies at the organization level, for all member accounts of an + * organization.

            + *
          • + *
          • + *

            + * AccountId - Specifies that the recommendation preference applies + * at the account level, for all resources of a given resource type in an + * account.

            + *
          • + *
          • + *

            + * ResourceArn - Specifies that the recommendation preference + * applies at the individual resource level.

            + *
          • + *
          + */ + name?: ScopeName | string; + + /** + *

          The value of the scope.

          + *

          If you specified the name of the scope as:

          + *
            + *
          • + *

            + * Organization - The value must be + * ALL_ACCOUNTS.

            + *
          • + *
          • + *

            + * AccountId - The value must be a 12-digit Amazon Web Services account ID.

            + *
          • + *
          • + *

            + * ResourceArn - The value must be the Amazon Resource + * Name (ARN) of an EC2 instance or an Auto Scaling group.

            + *
          • + *
          + *

          Only EC2 instance and Auto Scaling group ARNs are currently supported.

          + */ + value?: string; +} + +export namespace Scope { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Scope): any => ({ + ...obj, + }); +} + +export interface DeleteRecommendationPreferencesRequest { + /** + *

          The target resource type of the recommendation preference to delete.

          + *

          The Ec2Instance option encompasses standalone instances and instances + * that are part of Auto Scaling groups. The AutoScalingGroup option + * encompasses only instances that are part of an Auto Scaling group.

          + */ + resourceType: ResourceType | string | undefined; + + /** + *

          An object that describes the scope of the recommendation preference to delete.

          + *

          You can delete recommendation preferences that are created at the organization level + * (for management accounts of an organization only), account level, and resource level. + * For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

          + */ + scope?: Scope; + + /** + *

          The name of the recommendation preference to delete.

          + *

          Enhanced infrastructure metrics (EnhancedInfrastructureMetrics) is the + * only feature that can be activated through preferences. Therefore, it is also the only + * recommendation preference that can be deleted.

          + */ + recommendationPreferenceNames: (RecommendationPreferenceName | string)[] | undefined; +} + +export namespace DeleteRecommendationPreferencesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRecommendationPreferencesRequest): any => ({ + ...obj, + }); +} + +export interface DeleteRecommendationPreferencesResponse {} + +export namespace DeleteRecommendationPreferencesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRecommendationPreferencesResponse): any => ({ + ...obj, + }); +} + +/** + *

          An internal error has occurred. Try your call again.

          + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + message?: string; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

          The value supplied for the input parameter is out of range or not valid.

          + */ +export interface InvalidParameterValueException extends __SmithyException, $MetadataBearer { + name: "InvalidParameterValueException"; + $fault: "client"; + message?: string; +} + +export namespace InvalidParameterValueException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidParameterValueException): any => ({ + ...obj, + }); +} + +/** + *

          The request must contain either a valid (registered) Amazon Web Services access key ID + * or X.509 certificate.

          + */ +export interface MissingAuthenticationToken extends __SmithyException, $MetadataBearer { + name: "MissingAuthenticationToken"; + $fault: "client"; + message?: string; +} + +export namespace MissingAuthenticationToken { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MissingAuthenticationToken): any => ({ + ...obj, + }); +} + +/** + *

          The account is not opted in to Compute Optimizer.

          + */ +export interface OptInRequiredException extends __SmithyException, $MetadataBearer { + name: "OptInRequiredException"; + $fault: "client"; + message?: string; +} + +export namespace OptInRequiredException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OptInRequiredException): any => ({ + ...obj, + }); +} + +/** + *

          A resource that is required for the action doesn't exist.

          + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +/** + *

          The request has failed due to a temporary failure of the server.

          + */ +export interface ServiceUnavailableException extends __SmithyException, $MetadataBearer { + name: "ServiceUnavailableException"; + $fault: "server"; + message?: string; +} + +export namespace ServiceUnavailableException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceUnavailableException): any => ({ + ...obj, + }); +} + +/** + *

          The request was denied due to request throttling.

          + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message: string | undefined; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + export enum JobFilterName { JOB_STATUS = "JobStatus", RESOURCE_TYPE = "ResourceType", @@ -439,7 +847,6 @@ export enum JobFilterName { *

          Describes a filter that returns a more specific list of recommendation export jobs. * Use this filter with the DescribeRecommendationExportJobs * action.

          - * *

          You can use EBSFilter with the GetEBSVolumeRecommendations action, * LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and Filter with * the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

          @@ -447,10 +854,8 @@ export enum JobFilterName { export interface JobFilter { /** *

          The name of the filter.

          - * *

          Specify ResourceType to return export jobs of a specific resource type * (for example, Ec2Instance).

          - * *

          Specify JobStatus to return export jobs with a specific status (e.g, * Complete).

          */ @@ -458,10 +863,8 @@ export interface JobFilter { /** *

          The value of the filter.

          - * *

          The valid values for this parameter are as follows, depending on what you specify for * the name parameter:

          - * *
            *
          • *

            Specify Ec2Instance or AutoScalingGroup if you @@ -491,9 +894,7 @@ export namespace JobFilter { export interface DescribeRecommendationExportJobsRequest { /** *

            The identification numbers of the export jobs to return.

            - * *

            An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

            - * *

            All export jobs created in the last seven days are returned if this parameter is * omitted.

            */ @@ -512,7 +913,6 @@ export interface DescribeRecommendationExportJobsRequest { /** *

            The maximum number of export jobs to return with a single request.

            - * *

            To retrieve the remaining results, make another request with the returned * nextToken value.

            */ @@ -541,14 +941,12 @@ export interface S3Destination { /** *

            The Amazon S3 bucket key of an export file.

            - * *

            The key uniquely identifies the object, or export file, in the S3 bucket.

            */ key?: string; /** *

            The Amazon S3 bucket key of a metadata file.

            - * *

            The key uniquely identifies the object, or metadata file, in the S3 bucket.

            */ metadataKey?: string; @@ -584,13 +982,6 @@ export namespace ExportDestination { }); } -export enum ResourceType { - AUTO_SCALING_GROUP = "AutoScalingGroup", - EBS_VOLUME = "EbsVolume", - EC2_INSTANCE = "Ec2Instance", - LAMBDA_FUNCTION = "LambdaFunction", -} - export enum JobStatus { COMPLETE = "Complete", FAILED = "Failed", @@ -600,12 +991,8 @@ export enum JobStatus { /** *

            Describes a recommendation export job.

            - * - * *

            Use the DescribeRecommendationExportJobs action to view your * recommendation export jobs.

            - * - * *

            Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your * recommendations.

            */ @@ -663,7 +1050,6 @@ export interface DescribeRecommendationExportJobsResponse { /** *

            The token to use to advance to the next page of export jobs.

            - * *

            This value is null when there are no more pages of export jobs to return.

            */ nextToken?: string; @@ -678,133 +1064,6 @@ export namespace DescribeRecommendationExportJobsResponse { }); } -/** - *

            An internal error has occurred. Try your call again.

            - */ -export interface InternalServerException extends __SmithyException, $MetadataBearer { - name: "InternalServerException"; - $fault: "server"; - message?: string; -} - -export namespace InternalServerException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InternalServerException): any => ({ - ...obj, - }); -} - -/** - *

            The value supplied for the input parameter is out of range or not valid.

            - */ -export interface InvalidParameterValueException extends __SmithyException, $MetadataBearer { - name: "InvalidParameterValueException"; - $fault: "client"; - message?: string; -} - -export namespace InvalidParameterValueException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InvalidParameterValueException): any => ({ - ...obj, - }); -} - -/** - *

            The request must contain either a valid (registered) Amazon Web Services access key ID - * or X.509 certificate.

            - */ -export interface MissingAuthenticationToken extends __SmithyException, $MetadataBearer { - name: "MissingAuthenticationToken"; - $fault: "client"; - message?: string; -} - -export namespace MissingAuthenticationToken { - /** - * @internal - */ - export const filterSensitiveLog = (obj: MissingAuthenticationToken): any => ({ - ...obj, - }); -} - -/** - *

            The account is not opted in to Compute Optimizer.

            - */ -export interface OptInRequiredException extends __SmithyException, $MetadataBearer { - name: "OptInRequiredException"; - $fault: "client"; - message?: string; -} - -export namespace OptInRequiredException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: OptInRequiredException): any => ({ - ...obj, - }); -} - -/** - *

            A resource that is required for the action doesn't exist.

            - */ -export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { - name: "ResourceNotFoundException"; - $fault: "client"; - message?: string; -} - -export namespace ResourceNotFoundException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ - ...obj, - }); -} - -/** - *

            The request has failed due to a temporary failure of the server.

            - */ -export interface ServiceUnavailableException extends __SmithyException, $MetadataBearer { - name: "ServiceUnavailableException"; - $fault: "server"; - message?: string; -} - -export namespace ServiceUnavailableException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ServiceUnavailableException): any => ({ - ...obj, - }); -} - -/** - *

            The request was denied due to request throttling.

            - */ -export interface ThrottlingException extends __SmithyException, $MetadataBearer { - name: "ThrottlingException"; - $fault: "client"; - message: string | undefined; -} - -export namespace ThrottlingException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ThrottlingException): any => ({ - ...obj, - }); -} - export enum ExportableAutoScalingGroupField { ACCOUNT_ID = "AccountId", AUTO_SCALING_GROUP_ARN = "AutoScalingGroupArn", @@ -816,10 +1075,13 @@ export enum ExportableAutoScalingGroupField { CURRENT_MEMORY = "CurrentMemory", CURRENT_NETWORK = "CurrentNetwork", CURRENT_ON_DEMAND_PRICE = "CurrentOnDemandPrice", + CURRENT_PERFORMANCE_RISK = "CurrentPerformanceRisk", CURRENT_STANDARD_ONE_YEAR_NO_UPFRONT_RESERVED_PRICE = "CurrentStandardOneYearNoUpfrontReservedPrice", CURRENT_STANDARD_THREE_YEAR_NO_UPFRONT_RESERVED_PRICE = "CurrentStandardThreeYearNoUpfrontReservedPrice", CURRENT_STORAGE = "CurrentStorage", CURRENT_VCPUS = "CurrentVCpus", + EFFECTIVE_RECOMMENDATION_PREFERENCES_CPU_VENDOR_ARCHITECTURES = "EffectiveRecommendationPreferencesCpuVendorArchitectures", + EFFECTIVE_RECOMMENDATION_PREFERENCES_ENHANCED_INFRASTRUCTURE_METRICS = "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", FINDING = "Finding", LAST_REFRESH_TIMESTAMP = "LastRefreshTimestamp", LOOKBACK_PERIOD_IN_DAYS = "LookbackPeriodInDays", @@ -827,12 +1089,15 @@ export enum ExportableAutoScalingGroupField { RECOMMENDATION_OPTIONS_CONFIGURATION_INSTANCE_TYPE = "RecommendationOptionsConfigurationInstanceType", RECOMMENDATION_OPTIONS_CONFIGURATION_MAX_SIZE = "RecommendationOptionsConfigurationMaxSize", RECOMMENDATION_OPTIONS_CONFIGURATION_MIN_SIZE = "RecommendationOptionsConfigurationMinSize", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY = "RecommendationOptionsEstimatedMonthlySavingsCurrency", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE = "RecommendationOptionsEstimatedMonthlySavingsValue", RECOMMENDATION_OPTIONS_MEMORY = "RecommendationOptionsMemory", RECOMMENDATION_OPTIONS_NETWORK = "RecommendationOptionsNetwork", RECOMMENDATION_OPTIONS_ON_DEMAND_PRICE = "RecommendationOptionsOnDemandPrice", RECOMMENDATION_OPTIONS_PERFORMANCE_RISK = "RecommendationOptionsPerformanceRisk", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_CPU_MAXIMUM = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_MEMORY_MAXIMUM = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE = "RecommendationOptionsSavingsOpportunityPercentage", RECOMMENDATION_OPTIONS_STANDARD_ONE_YEAR_NO_UPFRONT_RESERVED_PRICE = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", RECOMMENDATION_OPTIONS_STANDARD_THREE_YEAR_NO_UPFRONT_RESERVED_PRICE = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", RECOMMENDATION_OPTIONS_STORAGE = "RecommendationOptionsStorage", @@ -866,7 +1131,6 @@ export enum FilterName { /** *

            Describes a filter that returns a more specific list of recommendations. Use this * filter with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

            - * *

            You can use EBSFilter with the GetEBSVolumeRecommendations action, * LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and JobFilter with * the DescribeRecommendationExportJobs action.

            @@ -874,13 +1138,10 @@ export enum FilterName { export interface Filter { /** *

            The name of the filter.

            - * *

            Specify Finding to return recommendations with a specific finding * classification (for example, Underprovisioned).

            - * *

            Specify RecommendationSourceType to return recommendations of a specific * resource type (for example, Ec2Instance).

            - * *

            Specify FindingReasonCodes to return recommendations with a specific * finding reason code (for example, CPUUnderprovisioned).

            */ @@ -888,11 +1149,9 @@ export interface Filter { /** *

            The value of the filter.

            - * *

            The valid values for this parameter are as follows, depending on what you specify for * the name parameter and the resource type that you wish to filter results * for:

            - * *
              *
            • *

              Specify Optimized or NotOptimized if you specify the @@ -913,7 +1172,6 @@ export interface Filter { *

            • *

              Specify one of the following options if you specify the name * parameter as FindingReasonCodes:

              - * *
                *
              • *

                @@ -1069,20 +1327,13 @@ export namespace Filter { }); } -export enum CpuVendorArchitecture { - AWS_ARM64 = "AWS_ARM64", - CURRENT = "CURRENT", -} - /** - *

                Describes preferences for recommendations.

                + *

                Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, and GetEC2RecommendationProjectedMetrics request.

                */ export interface RecommendationPreferences { /** *

                Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

                - * *

                For example, when you specify AWS_ARM64 with:

                - * *
                  *
                • *

                  A GetEC2InstanceRecommendations or GetAutoScalingGroupRecommendations request, Compute Optimizer @@ -1113,7 +1364,6 @@ export namespace RecommendationPreferences { /** *

                  Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and * key prefix for a recommendations export job.

                  - * *

                  You must create the destination Amazon S3 bucket for your recommendations * export before you create the export job. Compute Optimizer does not create the S3 bucket * for you. After you create the S3 bucket, ensure that it has the required permission @@ -1148,16 +1398,12 @@ export interface ExportAutoScalingGroupRecommendationsRequest { /** *

                  The IDs of the Amazon Web Services accounts for which to export Auto Scaling group * recommendations.

                  - * *

                  If your account is the management account of an organization, use this parameter to * specify the member account for which you want to export recommendations.

                  - * *

                  This parameter cannot be specified together with the include member accounts * parameter. The parameters are mutually exclusive.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the include member accounts parameter, is omitted.

                  - * *

                  You can specify multiple account IDs per request.

                  */ accountIds?: string[]; @@ -1177,20 +1423,18 @@ export interface ExportAutoScalingGroupRecommendationsRequest { /** *

                  An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket * name and key prefix for the export job.

                  - * *

                  You must create the destination Amazon S3 bucket for your recommendations * export before you create the export job. Compute Optimizer does not create the S3 bucket * for you. After you create the S3 bucket, ensure that it has the required permissions - * policy to allow Compute Optimizer to write the export file to it. If you plan to - * specify an object prefix when you create the export job, you must include the object - * prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the + * policy to allow Compute Optimizer to write the export file to it. If you plan to specify + * an object prefix when you create the export job, you must include the object prefix in + * the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the * Compute Optimizer User Guide.

                  */ s3DestinationConfig: S3DestinationConfig | undefined; /** *

                  The format of the export file.

                  - * *

                  The only export file format currently supported is Csv.

                  */ fileFormat?: FileFormat | string; @@ -1198,18 +1442,14 @@ export interface ExportAutoScalingGroupRecommendationsRequest { /** *

                  Indicates whether to include recommendations for resources in all member accounts of * the organization if your account is the management account of an organization.

                  - * *

                  The member accounts must also be opted in to Compute Optimizer, and trusted access for * Compute Optimizer must be enabled in the organization account. For more information, * see Compute Optimizer and Amazon Web Services Organizations trusted access in the * Compute Optimizer User Guide.

                  - * *

                  Recommendations for member accounts of the organization are not included in the export * file if this parameter is omitted.

                  - * *

                  This parameter cannot be specified together with the account IDs parameter. The * parameters are mutually exclusive.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the account IDs parameter, is omitted.

                  */ @@ -1234,7 +1474,6 @@ export namespace ExportAutoScalingGroupRecommendationsRequest { export interface ExportAutoScalingGroupRecommendationsResponse { /** *

                  The identification number of the export job.

                  - * *

                  Use the DescribeRecommendationExportJobs action, and specify the job * ID to view the status of an export job.

                  */ @@ -1283,6 +1522,7 @@ export enum ExportableVolumeField { CURRENT_CONFIGURATION_VOLUME_SIZE = "CurrentConfigurationVolumeSize", CURRENT_CONFIGURATION_VOLUME_TYPE = "CurrentConfigurationVolumeType", CURRENT_MONTHLY_PRICE = "CurrentMonthlyPrice", + CURRENT_PERFORMANCE_RISK = "CurrentPerformanceRisk", FINDING = "Finding", LAST_REFRESH_TIMESTAMP = "LastRefreshTimestamp", LOOKBACK_PERIOD_IN_DAYS = "LookbackPeriodInDays", @@ -1292,8 +1532,11 @@ export enum ExportableVolumeField { RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_BURST_THROUGHPUT = "RecommendationOptionsConfigurationVolumeBurstThroughput", RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_SIZE = "RecommendationOptionsConfigurationVolumeSize", RECOMMENDATION_OPTIONS_CONFIGURATION_VOLUME_TYPE = "RecommendationOptionsConfigurationVolumeType", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY = "RecommendationOptionsEstimatedMonthlySavingsCurrency", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE = "RecommendationOptionsEstimatedMonthlySavingsValue", RECOMMENDATION_OPTIONS_MONTHLY_PRICE = "RecommendationOptionsMonthlyPrice", RECOMMENDATION_OPTIONS_PERFORMANCE_RISK = "RecommendationOptionsPerformanceRisk", + RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE = "RecommendationOptionsSavingsOpportunityPercentage", UTILIZATION_METRICS_VOLUME_READ_BYTES_PER_SECOND_MAXIMUM = "UtilizationMetricsVolumeReadBytesPerSecondMaximum", UTILIZATION_METRICS_VOLUME_READ_OPS_PER_SECOND_MAXIMUM = "UtilizationMetricsVolumeReadOpsPerSecondMaximum", UTILIZATION_METRICS_VOLUME_WRITE_BYTES_PER_SECOND_MAXIMUM = "UtilizationMetricsVolumeWriteBytesPerSecondMaximum", @@ -1308,7 +1551,6 @@ export enum EBSFilterName { /** *

                  Describes a filter that returns a more specific list of Amazon Elastic Block Store * (Amazon EBS) volume recommendations. Use this filter with the GetEBSVolumeRecommendations action.

                  - * *

                  You can use LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, JobFilter with the * DescribeRecommendationExportJobs action, and Filter * with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                  @@ -1316,7 +1558,6 @@ export enum EBSFilterName { export interface EBSFilter { /** *

                  The name of the filter.

                  - * *

                  Specify Finding to return recommendations with a specific finding * classification (for example, NotOptimized).

                  */ @@ -1324,7 +1565,6 @@ export interface EBSFilter { /** *

                  The value of the filter.

                  - * *

                  The valid values are Optimized, or NotOptimized.

                  */ values?: string[]; @@ -1343,16 +1583,12 @@ export interface ExportEBSVolumeRecommendationsRequest { /** *

                  The IDs of the Amazon Web Services accounts for which to export Amazon EBS * volume recommendations.

                  - * *

                  If your account is the management account of an organization, use this parameter to * specify the member account for which you want to export recommendations.

                  - * *

                  This parameter cannot be specified together with the include member accounts * parameter. The parameters are mutually exclusive.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the include member accounts parameter, is omitted.

                  - * *

                  You can specify multiple account IDs per request.

                  */ accountIds?: string[]; @@ -1372,7 +1608,6 @@ export interface ExportEBSVolumeRecommendationsRequest { /** *

                  Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and * key prefix for a recommendations export job.

                  - * *

                  You must create the destination Amazon S3 bucket for your recommendations * export before you create the export job. Compute Optimizer does not create the S3 bucket * for you. After you create the S3 bucket, ensure that it has the required permission @@ -1385,7 +1620,6 @@ export interface ExportEBSVolumeRecommendationsRequest { /** *

                  The format of the export file.

                  - * *

                  The only export file format currently supported is Csv.

                  */ fileFormat?: FileFormat | string; @@ -1393,18 +1627,14 @@ export interface ExportEBSVolumeRecommendationsRequest { /** *

                  Indicates whether to include recommendations for resources in all member accounts of * the organization if your account is the management account of an organization.

                  - * *

                  The member accounts must also be opted in to Compute Optimizer, and trusted access for * Compute Optimizer must be enabled in the organization account. For more information, * see Compute Optimizer and Amazon Web Services Organizations trusted access in the * Compute Optimizer User Guide.

                  - * *

                  Recommendations for member accounts of the organization are not included in the export * file if this parameter is omitted.

                  - * *

                  This parameter cannot be specified together with the account IDs parameter. The * parameters are mutually exclusive.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the account IDs parameter, is omitted.

                  */ @@ -1423,7 +1653,6 @@ export namespace ExportEBSVolumeRecommendationsRequest { export interface ExportEBSVolumeRecommendationsResponse { /** *

                  The identification number of the export job.

                  - * *

                  Use the DescribeRecommendationExportJobs action, and specify the job * ID to view the status of an export job.

                  */ @@ -1451,10 +1680,13 @@ export enum ExportableInstanceField { CURRENT_MEMORY = "CurrentMemory", CURRENT_NETWORK = "CurrentNetwork", CURRENT_ON_DEMAND_PRICE = "CurrentOnDemandPrice", + CURRENT_PERFORMANCE_RISK = "CurrentPerformanceRisk", CURRENT_STANDARD_ONE_YEAR_NO_UPFRONT_RESERVED_PRICE = "CurrentStandardOneYearNoUpfrontReservedPrice", CURRENT_STANDARD_THREE_YEAR_NO_UPFRONT_RESERVED_PRICE = "CurrentStandardThreeYearNoUpfrontReservedPrice", CURRENT_STORAGE = "CurrentStorage", CURRENT_VCPUS = "CurrentVCpus", + EFFECTIVE_RECOMMENDATION_PREFERENCES_CPU_VENDOR_ARCHITECTURES = "EffectiveRecommendationPreferencesCpuVendorArchitectures", + EFFECTIVE_RECOMMENDATION_PREFERENCES_ENHANCED_INFRASTRUCTURE_METRICS = "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", FINDING = "Finding", Finding_Reason_Codes = "FindingReasonCodes", INSTANCE_ARN = "InstanceArn", @@ -1463,6 +1695,8 @@ export enum ExportableInstanceField { LOOKBACK_PERIOD_IN_DAYS = "LookbackPeriodInDays", RECOMMENDATIONS_SOURCES_RECOMMENDATION_SOURCE_ARN = "RecommendationsSourcesRecommendationSourceArn", RECOMMENDATIONS_SOURCES_RECOMMENDATION_SOURCE_TYPE = "RecommendationsSourcesRecommendationSourceType", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY = "RecommendationOptionsEstimatedMonthlySavingsCurrency", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE = "RecommendationOptionsEstimatedMonthlySavingsValue", RECOMMENDATION_OPTIONS_INSTANCE_TYPE = "RecommendationOptionsInstanceType", RECOMMENDATION_OPTIONS_MEMORY = "RecommendationOptionsMemory", RECOMMENDATION_OPTIONS_NETWORK = "RecommendationOptionsNetwork", @@ -1471,6 +1705,7 @@ export enum ExportableInstanceField { RECOMMENDATION_OPTIONS_PLATFORM_DIFFERENCES = "RecommendationOptionsPlatformDifferences", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_CPU_MAXIMUM = "RecommendationOptionsProjectedUtilizationMetricsCpuMaximum", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_MEMORY_MAXIMUM = "RecommendationOptionsProjectedUtilizationMetricsMemoryMaximum", + RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE = "RecommendationOptionsSavingsOpportunityPercentage", RECOMMENDATION_OPTIONS_STANDARD_ONE_YEAR_NO_UPFRONT_RESERVED_PRICE = "RecommendationOptionsStandardOneYearNoUpfrontReservedPrice", RECOMMENDATION_OPTIONS_STANDARD_THREE_YEAR_NO_UPFRONT_RESERVED_PRICE = "RecommendationOptionsStandardThreeYearNoUpfrontReservedPrice", RECOMMENDATION_OPTIONS_STORAGE = "RecommendationOptionsStorage", @@ -1495,16 +1730,12 @@ export interface ExportEC2InstanceRecommendationsRequest { /** *

                  The IDs of the Amazon Web Services accounts for which to export instance * recommendations.

                  - * *

                  If your account is the management account of an organization, use this parameter to * specify the member account for which you want to export recommendations.

                  - * *

                  This parameter cannot be specified together with the include member accounts * parameter. The parameters are mutually exclusive.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the include member accounts parameter, is omitted.

                  - * *

                  You can specify multiple account IDs per request.

                  */ accountIds?: string[]; @@ -1525,20 +1756,19 @@ export interface ExportEC2InstanceRecommendationsRequest { /** *

                  An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket * name and key prefix for the export job.

                  - * *

                  You must create the destination Amazon S3 bucket for your recommendations * export before you create the export job. Compute Optimizer does not create the S3 bucket * for you. After you create the S3 bucket, ensure that it has the required permissions - * policy policy to allow Compute Optimizer to write the export file to it. If you plan to + * policy to allow Compute Optimizer to write the export file to it. + * If you plan to * specify an object prefix when you create the export job, you must include the object - * prefix in the that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the + * prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the * Compute Optimizer User Guide.

                  */ s3DestinationConfig: S3DestinationConfig | undefined; /** *

                  The format of the export file.

                  - * *

                  The only export file format currently supported is Csv.

                  */ fileFormat?: FileFormat | string; @@ -1546,15 +1776,12 @@ export interface ExportEC2InstanceRecommendationsRequest { /** *

                  Indicates whether to include recommendations for resources in all member accounts of * the organization if your account is the management account of an organization.

                  - * *

                  The member accounts must also be opted in to Compute Optimizer, and trusted access for * Compute Optimizer must be enabled in the organization account. For more information, * see Compute Optimizer and Amazon Web Services Organizations trusted access in the * Compute Optimizer User Guide.

                  - * *

                  Recommendations for member accounts of the organization are not included in the export * file if this parameter is omitted.

                  - * *

                  Recommendations for member accounts are not included in the export if this parameter, * or the account IDs parameter, is omitted.

                  */ @@ -1579,7 +1806,6 @@ export namespace ExportEC2InstanceRecommendationsRequest { export interface ExportEC2InstanceRecommendationsResponse { /** *

                  The identification number of the export job.

                  - * *

                  Use the DescribeRecommendationExportJobs action, and specify the job * ID to view the status of an export job.

                  */ @@ -1607,6 +1833,7 @@ export enum ExportableLambdaFunctionField { CURRENT_CONFIGURATION_TIMEOUT = "CurrentConfigurationTimeout", CURRENT_COST_AVERAGE = "CurrentCostAverage", CURRENT_COST_TOTAL = "CurrentCostTotal", + CURRENT_PERFORMANCE_RISK = "CurrentPerformanceRisk", FINDING = "Finding", FINDING_REASON_CODES = "FindingReasonCodes", FUNCTION_ARN = "FunctionArn", @@ -1617,9 +1844,12 @@ export enum ExportableLambdaFunctionField { RECOMMENDATION_OPTIONS_CONFIGURATION_MEMORY_SIZE = "RecommendationOptionsConfigurationMemorySize", RECOMMENDATION_OPTIONS_COST_HIGH = "RecommendationOptionsCostHigh", RECOMMENDATION_OPTIONS_COST_LOW = "RecommendationOptionsCostLow", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY = "RecommendationOptionsEstimatedMonthlySavingsCurrency", + RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE = "RecommendationOptionsEstimatedMonthlySavingsValue", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_DURATION_EXPECTED = "RecommendationOptionsProjectedUtilizationMetricsDurationExpected", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_DURATION_LOWER_BOUND = "RecommendationOptionsProjectedUtilizationMetricsDurationLowerBound", RECOMMENDATION_OPTIONS_PROJECTED_UTILIZATION_METRICS_DURATION_UPPER_BOUND = "RecommendationOptionsProjectedUtilizationMetricsDurationUpperBound", + RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE = "RecommendationOptionsSavingsOpportunityPercentage", UTILIZATION_METRICS_DURATION_AVERAGE = "UtilizationMetricsDurationAverage", UTILIZATION_METRICS_DURATION_MAXIMUM = "UtilizationMetricsDurationMaximum", UTILIZATION_METRICS_MEMORY_AVERAGE = "UtilizationMetricsMemoryAverage", @@ -1634,7 +1864,6 @@ export enum LambdaFunctionRecommendationFilterName { /** *

                  Describes a filter that returns a more specific list of Lambda * function recommendations. Use this filter with the GetLambdaFunctionRecommendations action.

                  - * *

                  You can use EBSFilter with the GetEBSVolumeRecommendations action, JobFilter with the * DescribeRecommendationExportJobs action, and Filter * with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                  @@ -1642,10 +1871,8 @@ export enum LambdaFunctionRecommendationFilterName { export interface LambdaFunctionRecommendationFilter { /** *

                  The name of the filter.

                  - * *

                  Specify Finding to return recommendations with a specific finding * classification (for example, NotOptimized).

                  - * *

                  Specify FindingReasonCode to return recommendations with a specific * finding reason code (for example, MemoryUnderprovisioned).

                  */ @@ -1653,10 +1880,8 @@ export interface LambdaFunctionRecommendationFilter { /** *

                  The value of the filter.

                  - * *

                  The valid values for this parameter are as follows, depending on what you specify for * the name parameter:

                  - * *
                    *
                  • *

                    Specify Optimized, NotOptimized, or @@ -1687,16 +1912,12 @@ export interface ExportLambdaFunctionRecommendationsRequest { /** *

                    The IDs of the Amazon Web Services accounts for which to export Lambda * function recommendations.

                    - * *

                    If your account is the management account of an organization, use this parameter to * specify the member account for which you want to export recommendations.

                    - * *

                    This parameter cannot be specified together with the include member accounts * parameter. The parameters are mutually exclusive.

                    - * *

                    Recommendations for member accounts are not included in the export if this parameter, * or the include member accounts parameter, is omitted.

                    - * *

                    You can specify multiple account IDs per request.

                    */ accountIds?: string[]; @@ -1716,7 +1937,6 @@ export interface ExportLambdaFunctionRecommendationsRequest { /** *

                    Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and * key prefix for a recommendations export job.

                    - * *

                    You must create the destination Amazon S3 bucket for your recommendations * export before you create the export job. Compute Optimizer does not create the S3 bucket * for you. After you create the S3 bucket, ensure that it has the required permission @@ -1729,7 +1949,6 @@ export interface ExportLambdaFunctionRecommendationsRequest { /** *

                    The format of the export file.

                    - * *

                    The only export file format currently supported is Csv.

                    */ fileFormat?: FileFormat | string; @@ -1737,18 +1956,14 @@ export interface ExportLambdaFunctionRecommendationsRequest { /** *

                    Indicates whether to include recommendations for resources in all member accounts of * the organization if your account is the management account of an organization.

                    - * *

                    The member accounts must also be opted in to Compute Optimizer, and trusted access for * Compute Optimizer must be enabled in the organization account. For more information, * see Compute Optimizer and Amazon Web Services Organizations trusted access in the * Compute Optimizer User Guide.

                    - * *

                    Recommendations for member accounts of the organization are not included in the export * file if this parameter is omitted.

                    - * *

                    This parameter cannot be specified together with the account IDs parameter. The * parameters are mutually exclusive.

                    - * *

                    Recommendations for member accounts are not included in the export if this parameter, * or the account IDs parameter, is omitted.

                    */ @@ -1767,7 +1982,6 @@ export namespace ExportLambdaFunctionRecommendationsRequest { export interface ExportLambdaFunctionRecommendationsResponse { /** *

                    The identification number of the export job.

                    - * *

                    Use the DescribeRecommendationExportJobs action, and specify the job * ID to view the status of an export job.

                    */ @@ -1793,11 +2007,9 @@ export interface GetAutoScalingGroupRecommendationsRequest { /** *

                    The ID of the Amazon Web Services account for which to return Auto Scaling group * recommendations.

                    - * *

                    If your account is the management account of an organization, use this parameter to * specify the member account for which you want to return Auto Scaling group * recommendations.

                    - * *

                    Only one account ID can be specified per request.

                    */ accountIds?: string[]; @@ -1817,7 +2029,6 @@ export interface GetAutoScalingGroupRecommendationsRequest { /** *

                    The maximum number of Auto Scaling group recommendations to return with a single * request.

                    - * *

                    To retrieve the remaining results, make another request with the returned * nextToken value.

                    */ @@ -1846,7 +2057,6 @@ export namespace GetAutoScalingGroupRecommendationsRequest { /** *

                    Describes an error experienced when getting recommendations.

                    - * *

                    For example, an error is returned if you request recommendations for an unsupported * Auto Scaling group, or if you request recommendations for an instance of an * unsupported instance family.

                    @@ -1881,7 +2091,6 @@ export interface GetAutoScalingGroupRecommendationsResponse { /** *

                    The token to use to advance to the next page of Auto Scaling group * recommendations.

                    - * *

                    This value is null when there are no more pages of Auto Scaling group * recommendations to return.

                    */ @@ -1894,7 +2103,6 @@ export interface GetAutoScalingGroupRecommendationsResponse { /** *

                    An array of objects that describe errors of the request.

                    - * *

                    For example, an error is returned if you request recommendations for an unsupported * Auto Scaling group.

                    */ @@ -1924,7 +2132,6 @@ export interface GetEBSVolumeRecommendationsRequest { /** *

                    The maximum number of volume recommendations to return with a single request.

                    - * *

                    To retrieve the remaining results, make another request with the returned * nextToken value.

                    */ @@ -1939,10 +2146,8 @@ export interface GetEBSVolumeRecommendationsRequest { /** *

                    The ID of the Amazon Web Services account for which to return volume * recommendations.

                    - * *

                    If your account is the management account of an organization, use this parameter to * specify the member account for which you want to return volume recommendations.

                    - * *

                    Only one account ID can be specified per request.

                    */ accountIds?: string[]; @@ -1964,7 +2169,6 @@ export namespace GetEBSVolumeRecommendationsRequest { export interface VolumeConfiguration { /** *

                    The volume type.

                    - * *

                    This can be gp2 for General Purpose SSD, io1 or * io2 for Provisioned IOPS SSD, st1 for Throughput Optimized * HDD, sc1 for Cold HDD, or standard for Magnetic @@ -2022,7 +2226,6 @@ export enum EBSMetricName { /** *

                    Describes a utilization metric of an Amazon Elastic Block Store (Amazon EBS) * volume.

                    - * *

                    Compare the utilization metric data of your resource against its projected utilization * metric data to determine the performance difference between your current resource and * the recommended option.

                    @@ -2030,7 +2233,6 @@ export enum EBSMetricName { export interface EBSUtilizationMetric { /** *

                    The name of the utilization metric.

                    - * *

                    The following utilization metrics are available:

                    *
                      *
                    • @@ -2063,11 +2265,9 @@ export interface EBSUtilizationMetric { /** *

                      The statistic of the utilization metric.

                      - * *

                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs * return utilization metrics using only the Maximum statistic, which is the * highest value observed during the specified period.

                      - * *

                      The Compute Optimizer console displays graphs for some utilization metrics using the * Average statistic, which is the value of Sum / * SampleCount during the specified period. For more information, see @@ -2106,10 +2306,8 @@ export interface VolumeRecommendationOption { /** *

                      The performance risk of the volume recommendation option.

                      - * *

                      Performance risk is the likelihood of the recommended volume type meeting the * performance requirement of your workload.

                      - * *

                      The value ranges from 0 - 4, with 0 meaning * that the recommended resource is predicted to always provide enough hardware capability. * The higher the performance risk is, the more likely you should validate whether the @@ -2120,10 +2318,16 @@ export interface VolumeRecommendationOption { /** *

                      The rank of the volume recommendation option.

                      - * *

                      The top recommendation option is ranked as 1.

                      */ rank?: number; + + /** + *

                      An object that describes the savings opportunity for the EBS volume recommendation + * option. Savings opportunity includes the estimated monthly savings amount and + * percentage.

                      + */ + savingsOpportunity?: SavingsOpportunity; } export namespace VolumeRecommendationOption { @@ -2156,7 +2360,6 @@ export interface VolumeRecommendation { /** *

                      The finding classification of the volume.

                      - * *

                      Findings for volumes include:

                      *
                        *
                      • @@ -2199,9 +2402,16 @@ export interface VolumeRecommendation { volumeRecommendationOptions?: VolumeRecommendationOption[]; /** - *

                        The timestamp of when the volume recommendation was last refreshed.

                        + *

                        The timestamp of when the volume recommendation was last generated.

                        */ lastRefreshTimestamp?: Date; + + /** + *

                        The risk of the current EBS volume not meeting the performance needs of its workloads. + * The higher the risk, the more likely the current EBS volume doesn't have sufficient + * capacity.

                        + */ + currentPerformanceRisk?: CurrentPerformanceRisk | string; } export namespace VolumeRecommendation { @@ -2216,7 +2426,6 @@ export namespace VolumeRecommendation { export interface GetEBSVolumeRecommendationsResponse { /** *

                        The token to use to advance to the next page of volume recommendations.

                        - * *

                        This value is null when there are no more pages of volume recommendations to * return.

                        */ @@ -2229,7 +2438,6 @@ export interface GetEBSVolumeRecommendationsResponse { /** *

                        An array of objects that describe errors of the request.

                        - * *

                        For example, an error is returned if you request recommendations for an unsupported * volume.

                        */ @@ -2259,7 +2467,6 @@ export interface GetEC2InstanceRecommendationsRequest { /** *

                        The maximum number of instance recommendations to return with a single request.

                        - * *

                        To retrieve the remaining results, make another request with the returned * nextToken value.

                        */ @@ -2274,10 +2481,8 @@ export interface GetEC2InstanceRecommendationsRequest { /** *

                        The ID of the Amazon Web Services account for which to return instance * recommendations.

                        - * *

                        If your account is the management account of an organization, use this parameter to * specify the member account for which you want to return instance recommendations.

                        - * *

                        Only one account ID can be specified per request.

                        */ accountIds?: string[]; @@ -2338,7 +2543,6 @@ export interface InstanceRecommendationOption { /** *

                        An array of objects that describe the projected utilization metrics of the instance * recommendation option.

                        - * * *

                        The Cpu and Memory metrics are the only projected * utilization metrics returned. Additionally, the Memory metric is @@ -2356,9 +2560,7 @@ export interface InstanceRecommendationOption { * Change the instance type guide for Linux and Change the instance type * guide for Windows provide general guidance for getting started with an * instance migration.

                        - * *

                        Platform differences include:

                        - * *
                          *
                        • *

                          @@ -2453,7 +2655,6 @@ export interface InstanceRecommendationOption { /** *

                          The performance risk of the instance recommendation option.

                          - * *

                          Performance risk indicates the likelihood of the recommended instance type not meeting * the resource needs of your workload. Compute Optimizer calculates an individual * performance risk score for each specification of the recommended instance, including @@ -2462,7 +2663,6 @@ export interface InstanceRecommendationOption { * The performance * risk of the recommended instance is calculated as the maximum performance risk score * across the analyzed resource specifications.

                          - * *

                          The value ranges from 0 - 4, with 0 meaning * that the recommended resource is predicted to always provide enough hardware capability. * The higher the performance risk is, the more likely you should validate whether the @@ -2473,10 +2673,16 @@ export interface InstanceRecommendationOption { /** *

                          The rank of the instance recommendation option.

                          - * *

                          The top recommendation option is ranked as 1.

                          */ rank?: number; + + /** + *

                          An object that describes the savings opportunity for the instance recommendation + * option. Savings opportunity includes the estimated monthly savings amount and + * percentage.

                          + */ + savingsOpportunity?: SavingsOpportunity; } export namespace InstanceRecommendationOption { @@ -2546,9 +2752,7 @@ export interface InstanceRecommendation { /** *

                          The finding classification of the instance.

                          - * *

                          Findings for instances include:

                          - * *
                            *
                          • *

                            @@ -2587,9 +2791,7 @@ export interface InstanceRecommendation { /** *

                            The reason for the finding classification of the instance.

                            - * *

                            Finding reason codes for instances include:

                            - * *
                              *
                            • *

                              @@ -2630,7 +2832,6 @@ export interface InstanceRecommendation { * your workload and there is an alternative instance type that provides better * memory performance. This is identified by analyzing the memory utilization * metric of the current instance during the look-back period.

                              - * * *

                              Memory utilization is analyzed only for resources that have the unified * CloudWatch agent installed on them. For more information, see @@ -2776,7 +2977,6 @@ export interface InstanceRecommendation { * current instance during the look-back period.

                              *
                            • *
                            - * * *

                            For more information about instance metrics, see List the * available CloudWatch metrics for your instances in the @@ -2810,9 +3010,22 @@ export interface InstanceRecommendation { recommendationSources?: RecommendationSource[]; /** - *

                            The timestamp of when the instance recommendation was last refreshed.

                            + *

                            The timestamp of when the instance recommendation was last generated.

                            */ lastRefreshTimestamp?: Date; + + /** + *

                            The risk of the current instance not meeting the performance needs of its workloads. + * The higher the risk, the more likely the current Lambda function requires + * more memory.

                            + */ + currentPerformanceRisk?: CurrentPerformanceRisk | string; + + /** + *

                            An object that describes the effective recommendation preferences for the + * instance.

                            + */ + effectiveRecommendationPreferences?: EffectiveRecommendationPreferences; } export namespace InstanceRecommendation { @@ -2827,7 +3040,6 @@ export namespace InstanceRecommendation { export interface GetEC2InstanceRecommendationsResponse { /** *

                            The token to use to advance to the next page of instance recommendations.

                            - * *

                            This value is null when there are no more pages of instance recommendations to * return.

                            */ @@ -2840,7 +3052,6 @@ export interface GetEC2InstanceRecommendationsResponse { /** *

                            An array of objects that describe errors of the request.

                            - * *

                            For example, an error is returned if you request recommendations for an instance of an * unsupported instance family.

                            */ @@ -2903,11 +3114,9 @@ export namespace GetEC2RecommendationProjectedMetricsRequest { *

                            Describes a projected utilization metric of a recommendation option, such as an * Amazon EC2 instance. This represents the projected utilization of a * recommendation option had you used that resource during the analyzed period.

                            - * *

                            Compare the utilization metric data of your resource against its projected utilization * metric data to determine the performance difference between your current resource and * the recommended option.

                            - * * *

                            The Cpu and Memory metrics are the only projected * utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the @@ -2918,7 +3127,6 @@ export namespace GetEC2RecommendationProjectedMetricsRequest { export interface ProjectedMetric { /** *

                            The name of the projected utilization metric.

                            - * *

                            The following projected utilization metrics are returned:

                            *
                              *
                            • @@ -2972,7 +3180,6 @@ export namespace ProjectedMetric { /** *

                              Describes a projected utilization metric of a recommendation option.

                              - * * *

                              The Cpu and Memory metrics are the only projected * utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the @@ -2988,9 +3195,7 @@ export interface RecommendedOptionProjectedMetric { /** *

                              The rank of the recommendation option projected metric.

                              - * *

                              The top recommendation option is ranked as 1.

                              - * *

                              The projected metric rank correlates to the recommendation option rank. For example, * the projected metric ranked as 1 is related to the recommendation option * that is also ranked as 1 in the same response.

                              @@ -3028,6 +3233,48 @@ export namespace GetEC2RecommendationProjectedMetricsResponse { }); } +export interface GetEffectiveRecommendationPreferencesRequest { + /** + *

                              The Amazon Resource Name (ARN) of the resource for which to confirm effective + * recommendation preferences. Only EC2 instance and Auto Scaling group ARNs are + * currently supported.

                              + */ + resourceArn: string | undefined; +} + +export namespace GetEffectiveRecommendationPreferencesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEffectiveRecommendationPreferencesRequest): any => ({ + ...obj, + }); +} + +export interface GetEffectiveRecommendationPreferencesResponse { + /** + *

                              The status of the enhanced infrastructure metrics recommendation preference. Considers + * all applicable preferences that you might have set at the resource, account, and + * organization level.

                              + *

                              A status of Active confirms that the preference is applied in the latest + * recommendation refresh, and a status of Inactive confirms that it's not yet + * applied.

                              + *

                              To validate whether the preference is applied to your last generated set of + * recommendations, review the effectiveRecommendationPreferences value in the + * response of the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                              + */ + enhancedInfrastructureMetrics?: EnhancedInfrastructureMetrics | string; +} + +export namespace GetEffectiveRecommendationPreferencesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEffectiveRecommendationPreferencesResponse): any => ({ + ...obj, + }); +} + export interface GetEnrollmentStatusRequest {} export namespace GetEnrollmentStatusRequest { @@ -3047,7 +3294,6 @@ export interface GetEnrollmentStatusResponse { /** *

                              The reason for the enrollment status of the account.

                              - * *

                              For example, an account might show a status of Pending because member * accounts of an organization require more time to be enrolled in the service.

                              */ @@ -3093,7 +3339,6 @@ export enum EnrollmentFilterName { export interface EnrollmentFilter { /** *

                              The name of the filter.

                              - * *

                              Specify Status to return accounts with a specific enrollment status (for * example, Active).

                              */ @@ -3101,7 +3346,6 @@ export interface EnrollmentFilter { /** *

                              The value of the filter.

                              - * *

                              The valid values are Active, Inactive, Pending, * and Failed.

                              */ @@ -3132,7 +3376,6 @@ export interface GetEnrollmentStatusesForOrganizationRequest { /** *

                              The maximum number of account enrollment statuses to return with a single request. You * can specify up to 100 statuses to return with each request.

                              - * *

                              To retrieve the remaining results, make another request with the returned * nextToken value.

                              */ @@ -3157,7 +3400,6 @@ export interface GetEnrollmentStatusesForOrganizationResponse { /** *

                              The token to use to advance to the next page of account enrollment statuses.

                              - * *

                              This value is null when there are no more pages of account enrollment statuses to * return.

                              */ @@ -3177,7 +3419,6 @@ export interface GetLambdaFunctionRecommendationsRequest { /** *

                              The Amazon Resource Name (ARN) of the functions for which to return * recommendations.

                              - * *

                              You can specify a qualified or unqualified ARN. If you specify an unqualified ARN * without a function version suffix, Compute Optimizer will return recommendations for the * latest ($LATEST) version of the function. If you specify a qualified ARN @@ -3191,10 +3432,8 @@ export interface GetLambdaFunctionRecommendationsRequest { /** *

                              The ID of the Amazon Web Services account for which to return function * recommendations.

                              - * *

                              If your account is the management account of an organization, use this parameter to * specify the member account for which you want to return function recommendations.

                              - * *

                              Only one account ID can be specified per request.

                              */ accountIds?: string[]; @@ -3212,7 +3451,6 @@ export interface GetLambdaFunctionRecommendationsRequest { /** *

                              The maximum number of function recommendations to return with a single request.

                              - * *

                              To retrieve the remaining results, make another request with the returned * nextToken value.

                              */ @@ -3287,7 +3525,6 @@ export namespace LambdaFunctionMemoryProjectedMetric { export interface LambdaFunctionMemoryRecommendationOption { /** *

                              The rank of the function recommendation option.

                              - * *

                              The top recommendation option is ranked as 1.

                              */ rank?: number; @@ -3302,6 +3539,13 @@ export interface LambdaFunctionMemoryRecommendationOption { * recommendation option.

                              */ projectedUtilizationMetrics?: LambdaFunctionMemoryProjectedMetric[]; + + /** + *

                              An object that describes the savings opportunity for the Lambda function + * recommendation option. Savings opportunity includes the estimated monthly savings amount + * and percentage.

                              + */ + savingsOpportunity?: SavingsOpportunity; } export namespace LambdaFunctionMemoryRecommendationOption { @@ -3329,7 +3573,6 @@ export enum LambdaFunctionMetricStatistic { export interface LambdaFunctionUtilizationMetric { /** *

                              The name of the utilization metric.

                              - * *

                              The following utilization metrics are available:

                              *
                                *
                              • @@ -3347,11 +3590,9 @@ export interface LambdaFunctionUtilizationMetric { /** *

                                The statistic of the utilization metric.

                                - * *

                                The Compute Optimizer API, Command Line Interface (CLI), and SDKs * return utilization metrics using only the Maximum statistic, which is the * highest value observed during the specified period.

                                - * *

                                The Compute Optimizer console displays graphs for some utilization metrics using the * Average statistic, which is the value of Sum / * SampleCount during the specified period. For more information, see @@ -3419,13 +3660,12 @@ export interface LambdaFunctionRecommendation { lookbackPeriodInDays?: number; /** - *

                                The timestamp of when the function recommendation was last refreshed.

                                + *

                                The timestamp of when the function recommendation was last generated.

                                */ lastRefreshTimestamp?: Date; /** *

                                The finding classification of the function.

                                - * *

                                Findings for functions include:

                                *
                                  *
                                • @@ -3529,6 +3769,13 @@ export interface LambdaFunctionRecommendation { * the function.

                                  */ memorySizeRecommendationOptions?: LambdaFunctionMemoryRecommendationOption[]; + + /** + *

                                  The risk of the current Lambda function not meeting the performance needs + * of its workloads. The higher the risk, the more likely the current Lambda + * function configuration is underperforming in its workload.

                                  + */ + currentPerformanceRisk?: CurrentPerformanceRisk | string; } export namespace LambdaFunctionRecommendation { @@ -3543,7 +3790,6 @@ export namespace LambdaFunctionRecommendation { export interface GetLambdaFunctionRecommendationsResponse { /** *

                                  The token to use to advance to the next page of function recommendations.

                                  - * *

                                  This value is null when there are no more pages of function recommendations to * return.

                                  */ @@ -3564,14 +3810,118 @@ export namespace GetLambdaFunctionRecommendationsResponse { }); } +export interface GetRecommendationPreferencesRequest { + /** + *

                                  The target resource type of the recommendation preference for which to return + * preferences.

                                  + *

                                  The Ec2Instance option encompasses standalone instances and instances + * that are part of Auto Scaling groups. The AutoScalingGroup option + * encompasses only instances that are part of an Auto Scaling group.

                                  + */ + resourceType: ResourceType | string | undefined; + + /** + *

                                  An object that describes the scope of the recommendation preference to return.

                                  + *

                                  You can return recommendation preferences that are created at the organization level + * (for management accounts of an organization only), account level, and resource level. + * For more information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

                                  + */ + scope?: Scope; + + /** + *

                                  The token to advance to the next page of recommendation preferences.

                                  + */ + nextToken?: string; + + /** + *

                                  The maximum number of recommendation preferences to return with a single + * request.

                                  + *

                                  To retrieve the remaining results, make another request with the returned + * nextToken value.

                                  + */ + maxResults?: number; +} + +export namespace GetRecommendationPreferencesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRecommendationPreferencesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes a recommendation preference.

                                  + */ +export interface RecommendationPreferencesDetail { + /** + *

                                  An object that describes the scope of the recommendation preference.

                                  + *

                                  Recommendation preferences can be created at the organization level (for management + * accounts of an organization only), account level, and resource level. For more + * information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

                                  + */ + scope?: Scope; + + /** + *

                                  The target resource type of the recommendation preference to create.

                                  + *

                                  The Ec2Instance option encompasses standalone instances and instances + * that are part of Auto Scaling groups. The AutoScalingGroup option + * encompasses only instances that are part of an Auto Scaling group.

                                  + */ + resourceType?: ResourceType | string; + + /** + *

                                  The status of the enhanced infrastructure metrics recommendation preference.

                                  + *

                                  A status of Active confirms that the preference is applied in the latest + * recommendation refresh, and a status of Inactive confirms that it's not yet + * applied.

                                  + */ + enhancedInfrastructureMetrics?: EnhancedInfrastructureMetrics | string; +} + +export namespace RecommendationPreferencesDetail { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RecommendationPreferencesDetail): any => ({ + ...obj, + }); +} + +export interface GetRecommendationPreferencesResponse { + /** + *

                                  The token to use to advance to the next page of recommendation preferences.

                                  + *

                                  This value is null when there are no more pages of recommendation preferences to + * return.

                                  + */ + nextToken?: string; + + /** + *

                                  An array of objects that describe recommendation preferences.

                                  + */ + recommendationPreferencesDetails?: RecommendationPreferencesDetail[]; +} + +export namespace GetRecommendationPreferencesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRecommendationPreferencesResponse): any => ({ + ...obj, + }); +} + export interface GetRecommendationSummariesRequest { /** *

                                  The ID of the Amazon Web Services account for which to return recommendation * summaries.

                                  - * *

                                  If your account is the management account of an organization, use this parameter to * specify the member account for which you want to return recommendation summaries.

                                  - * *

                                  Only one account ID can be specified per request.

                                  */ accountIds?: string[]; @@ -3583,7 +3933,6 @@ export interface GetRecommendationSummariesRequest { /** *

                                  The maximum number of recommendation summaries to return with a single request.

                                  - * *

                                  To retrieve the remaining results, make another request with the returned * nextToken value.

                                  */ @@ -3599,6 +3948,44 @@ export namespace GetRecommendationSummariesRequest { }); } +/** + *

                                  Describes the performance risk ratings for a given resource type.

                                  + *

                                  Resources with a high or medium rating are at risk of not + * meeting the performance needs of their workloads, while resources with a + * low rating are performing well in their workloads.

                                  + */ +export interface CurrentPerformanceRiskRatings { + /** + *

                                  A count of the applicable resource types with a high performance risk rating.

                                  + */ + high?: number; + + /** + *

                                  A count of the applicable resource types with a medium performance risk rating.

                                  + */ + medium?: number; + + /** + *

                                  A count of the applicable resource types with a low performance risk rating.

                                  + */ + low?: number; + + /** + *

                                  A count of the applicable resource types with a very low performance risk + * rating.

                                  + */ + veryLow?: number; +} + +export namespace CurrentPerformanceRiskRatings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CurrentPerformanceRiskRatings): any => ({ + ...obj, + }); +} + export enum FindingReasonCode { MEMORY_OVER_PROVISIONED = "MemoryOverprovisioned", MEMORY_UNDER_PROVISIONED = "MemoryUnderprovisioned", @@ -3667,7 +4054,7 @@ export interface RecommendationSummary { summaries?: Summary[]; /** - *

                                  The resource type of the recommendation.

                                  + *

                                  The resource type that the recommendation summary applies to.

                                  */ recommendationResourceType?: RecommendationSourceType | string; @@ -3675,6 +4062,18 @@ export interface RecommendationSummary { *

                                  The Amazon Web Services account ID of the recommendation summary.

                                  */ accountId?: string; + + /** + *

                                  An object that describes the savings opportunity for a given resource type. Savings + * opportunity includes the estimated monthly savings amount and percentage.

                                  + */ + savingsOpportunity?: SavingsOpportunity; + + /** + *

                                  An object that describes the performance risk ratings for a given resource + * type.

                                  + */ + currentPerformanceRiskRatings?: CurrentPerformanceRiskRatings; } export namespace RecommendationSummary { @@ -3689,7 +4088,6 @@ export namespace RecommendationSummary { export interface GetRecommendationSummariesResponse { /** *

                                  The token to use to advance to the next page of recommendation summaries.

                                  - * *

                                  This value is null when there are no more pages of recommendation summaries to * return.

                                  */ @@ -3710,12 +4108,67 @@ export namespace GetRecommendationSummariesResponse { }); } +export interface PutRecommendationPreferencesRequest { + /** + *

                                  The target resource type of the recommendation preference to create.

                                  + *

                                  The Ec2Instance option encompasses standalone instances and instances + * that are part of Auto Scaling groups. The AutoScalingGroup option + * encompasses only instances that are part of an Auto Scaling group.

                                  + */ + resourceType: ResourceType | string | undefined; + + /** + *

                                  An object that describes the scope of the recommendation preference to create.

                                  + *

                                  You can create recommendation preferences at the organization level (for management + * accounts of an organization only), account level, and resource level. For more + * information, see Activating + * enhanced infrastructure metrics in the Compute Optimizer User + * Guide.

                                  + * + *

                                  You cannot create recommendation preferences for Auto Scaling groups at the + * organization and account levels. You can create recommendation preferences for + * Auto Scaling groups only at the resource level by specifying a scope name + * of ResourceArn and a scope value of the Auto Scaling group Amazon + * Resource Name (ARN). This will configure the preference for all instances that are + * part of the specified the Auto Scaling group.

                                  + *
                                  + */ + scope?: Scope; + + /** + *

                                  The status of the enhanced infrastructure metrics recommendation preference to create + * or update.

                                  + *

                                  A status of Active confirms that the preference is applied in the latest + * recommendation refresh, and a status of Inactive confirms that it's not yet + * applied.

                                  + */ + enhancedInfrastructureMetrics?: EnhancedInfrastructureMetrics | string; +} + +export namespace PutRecommendationPreferencesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRecommendationPreferencesRequest): any => ({ + ...obj, + }); +} + +export interface PutRecommendationPreferencesResponse {} + +export namespace PutRecommendationPreferencesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRecommendationPreferencesResponse): any => ({ + ...obj, + }); +} + export interface UpdateEnrollmentStatusRequest { /** *

                                  The new enrollment status of the account.

                                  - * *

                                  The following status options are available:

                                  - * *
                                    *
                                  • *

                                    @@ -3731,7 +4184,6 @@ export interface UpdateEnrollmentStatusRequest { * from Compute Optimizer after you opt out.

                                    *
                                  • *
                                  - * * *

                                  The Pending and Failed options cannot be used to update * the enrollment status of an account. They are returned in the response of a request diff --git a/clients/client-compute-optimizer/src/protocols/Aws_json1_0.ts b/clients/client-compute-optimizer/src/protocols/Aws_json1_0.ts index 61f03a3c98d2..1492eb3c7133 100644 --- a/clients/client-compute-optimizer/src/protocols/Aws_json1_0.ts +++ b/clients/client-compute-optimizer/src/protocols/Aws_json1_0.ts @@ -18,6 +18,10 @@ import { SmithyException as __SmithyException, } from "@aws-sdk/types"; +import { + DeleteRecommendationPreferencesCommandInput, + DeleteRecommendationPreferencesCommandOutput, +} from "../commands/DeleteRecommendationPreferencesCommand"; import { DescribeRecommendationExportJobsCommandInput, DescribeRecommendationExportJobsCommandOutput, @@ -54,6 +58,10 @@ import { GetEC2RecommendationProjectedMetricsCommandInput, GetEC2RecommendationProjectedMetricsCommandOutput, } from "../commands/GetEC2RecommendationProjectedMetricsCommand"; +import { + GetEffectiveRecommendationPreferencesCommandInput, + GetEffectiveRecommendationPreferencesCommandOutput, +} from "../commands/GetEffectiveRecommendationPreferencesCommand"; import { GetEnrollmentStatusCommandInput, GetEnrollmentStatusCommandOutput, @@ -66,10 +74,18 @@ import { GetLambdaFunctionRecommendationsCommandInput, GetLambdaFunctionRecommendationsCommandOutput, } from "../commands/GetLambdaFunctionRecommendationsCommand"; +import { + GetRecommendationPreferencesCommandInput, + GetRecommendationPreferencesCommandOutput, +} from "../commands/GetRecommendationPreferencesCommand"; import { GetRecommendationSummariesCommandInput, GetRecommendationSummariesCommandOutput, } from "../commands/GetRecommendationSummariesCommand"; +import { + PutRecommendationPreferencesCommandInput, + PutRecommendationPreferencesCommandOutput, +} from "../commands/PutRecommendationPreferencesCommand"; import { UpdateEnrollmentStatusCommandInput, UpdateEnrollmentStatusCommandOutput, @@ -81,11 +97,16 @@ import { AutoScalingGroupRecommendation, AutoScalingGroupRecommendationOption, CpuVendorArchitecture, + CurrentPerformanceRiskRatings, + DeleteRecommendationPreferencesRequest, + DeleteRecommendationPreferencesResponse, DescribeRecommendationExportJobsRequest, DescribeRecommendationExportJobsResponse, EBSFilter, EBSUtilizationMetric, + EffectiveRecommendationPreferences, EnrollmentFilter, + EstimatedMonthlySavings, ExportableAutoScalingGroupField, ExportableInstanceField, ExportableLambdaFunctionField, @@ -108,6 +129,8 @@ import { GetEC2InstanceRecommendationsResponse, GetEC2RecommendationProjectedMetricsRequest, GetEC2RecommendationProjectedMetricsResponse, + GetEffectiveRecommendationPreferencesRequest, + GetEffectiveRecommendationPreferencesResponse, GetEnrollmentStatusesForOrganizationRequest, GetEnrollmentStatusesForOrganizationResponse, GetEnrollmentStatusRequest, @@ -115,6 +138,8 @@ import { GetLambdaFunctionRecommendationsRequest, GetLambdaFunctionRecommendationsResponse, GetRecommendationError, + GetRecommendationPreferencesRequest, + GetRecommendationPreferencesResponse, GetRecommendationSummariesRequest, GetRecommendationSummariesResponse, InstanceRecommendation, @@ -134,15 +159,21 @@ import { OptInRequiredException, PlatformDifference, ProjectedMetric, + PutRecommendationPreferencesRequest, + PutRecommendationPreferencesResponse, ReasonCodeSummary, RecommendationExportJob, + RecommendationPreferenceName, RecommendationPreferences, + RecommendationPreferencesDetail, RecommendationSource, RecommendationSummary, RecommendedOptionProjectedMetric, ResourceNotFoundException, S3Destination, S3DestinationConfig, + SavingsOpportunity, + Scope, ServiceUnavailableException, Summary, ThrottlingException, @@ -154,6 +185,19 @@ import { VolumeRecommendationOption, } from "../models/models_0"; +export const serializeAws_json1_0DeleteRecommendationPreferencesCommand = async ( + input: DeleteRecommendationPreferencesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "ComputeOptimizerService.DeleteRecommendationPreferences", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0DeleteRecommendationPreferencesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0DescribeRecommendationExportJobsCommand = async ( input: DescribeRecommendationExportJobsCommandInput, context: __SerdeContext @@ -271,6 +315,19 @@ export const serializeAws_json1_0GetEC2RecommendationProjectedMetricsCommand = a return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0GetEffectiveRecommendationPreferencesCommand = async ( + input: GetEffectiveRecommendationPreferencesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "ComputeOptimizerService.GetEffectiveRecommendationPreferences", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0GetEffectiveRecommendationPreferencesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0GetEnrollmentStatusCommand = async ( input: GetEnrollmentStatusCommandInput, context: __SerdeContext @@ -310,6 +367,19 @@ export const serializeAws_json1_0GetLambdaFunctionRecommendationsCommand = async return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0GetRecommendationPreferencesCommand = async ( + input: GetRecommendationPreferencesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "ComputeOptimizerService.GetRecommendationPreferences", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0GetRecommendationPreferencesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0GetRecommendationSummariesCommand = async ( input: GetRecommendationSummariesCommandInput, context: __SerdeContext @@ -323,6 +393,19 @@ export const serializeAws_json1_0GetRecommendationSummariesCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_0PutRecommendationPreferencesCommand = async ( + input: PutRecommendationPreferencesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.0", + "x-amz-target": "ComputeOptimizerService.PutRecommendationPreferences", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_0PutRecommendationPreferencesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_0UpdateEnrollmentStatusCommand = async ( input: UpdateEnrollmentStatusCommandInput, context: __SerdeContext @@ -336,6 +419,116 @@ export const serializeAws_json1_0UpdateEnrollmentStatusCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const deserializeAws_json1_0DeleteRecommendationPreferencesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0DeleteRecommendationPreferencesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0DeleteRecommendationPreferencesResponse(data, context); + const response: DeleteRecommendationPreferencesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0DeleteRecommendationPreferencesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.computeoptimizer#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.computeoptimizer#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterValueException": + case "com.amazonaws.computeoptimizer#InvalidParameterValueException": + response = { + ...(await deserializeAws_json1_0InvalidParameterValueExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingAuthenticationToken": + case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": + response = { + ...(await deserializeAws_json1_0MissingAuthenticationTokenResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OptInRequiredException": + case "com.amazonaws.computeoptimizer#OptInRequiredException": + response = { + ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.computeoptimizer#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.computeoptimizer#ServiceUnavailableException": + response = { + ...(await deserializeAws_json1_0ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.computeoptimizer#ThrottlingException": + response = { + ...(await deserializeAws_json1_0ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_0DescribeRecommendationExportJobsCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1219,24 +1412,338 @@ const deserializeAws_json1_0GetEC2InstanceRecommendationsCommandError = async ( export const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0GetEC2RecommendationProjectedMetricsResponse(data, context); + const response: GetEC2RecommendationProjectedMetricsCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.computeoptimizer#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.computeoptimizer#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterValueException": + case "com.amazonaws.computeoptimizer#InvalidParameterValueException": + response = { + ...(await deserializeAws_json1_0InvalidParameterValueExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingAuthenticationToken": + case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": + response = { + ...(await deserializeAws_json1_0MissingAuthenticationTokenResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OptInRequiredException": + case "com.amazonaws.computeoptimizer#OptInRequiredException": + response = { + ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.computeoptimizer#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.computeoptimizer#ServiceUnavailableException": + response = { + ...(await deserializeAws_json1_0ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.computeoptimizer#ThrottlingException": + response = { + ...(await deserializeAws_json1_0ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0GetEffectiveRecommendationPreferencesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0GetEffectiveRecommendationPreferencesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0GetEffectiveRecommendationPreferencesResponse(data, context); + const response: GetEffectiveRecommendationPreferencesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0GetEffectiveRecommendationPreferencesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.computeoptimizer#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.computeoptimizer#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterValueException": + case "com.amazonaws.computeoptimizer#InvalidParameterValueException": + response = { + ...(await deserializeAws_json1_0InvalidParameterValueExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingAuthenticationToken": + case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": + response = { + ...(await deserializeAws_json1_0MissingAuthenticationTokenResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OptInRequiredException": + case "com.amazonaws.computeoptimizer#OptInRequiredException": + response = { + ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.computeoptimizer#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.computeoptimizer#ServiceUnavailableException": + response = { + ...(await deserializeAws_json1_0ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.computeoptimizer#ThrottlingException": + response = { + ...(await deserializeAws_json1_0ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0GetEnrollmentStatusCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_0GetEnrollmentStatusCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_0GetEnrollmentStatusResponse(data, context); + const response: GetEnrollmentStatusCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_0GetEnrollmentStatusCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.computeoptimizer#AccessDeniedException": + response = { + ...(await deserializeAws_json1_0AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.computeoptimizer#InternalServerException": + response = { + ...(await deserializeAws_json1_0InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidParameterValueException": + case "com.amazonaws.computeoptimizer#InvalidParameterValueException": + response = { + ...(await deserializeAws_json1_0InvalidParameterValueExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingAuthenticationToken": + case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": + response = { + ...(await deserializeAws_json1_0MissingAuthenticationTokenResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.computeoptimizer#ServiceUnavailableException": + response = { + ...(await deserializeAws_json1_0ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.computeoptimizer#ThrottlingException": + response = { + ...(await deserializeAws_json1_0ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError(output, context); + return deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_0GetEC2RecommendationProjectedMetricsResponse(data, context); - const response: GetEC2RecommendationProjectedMetricsCommandOutput = { + contents = deserializeAws_json1_0GetEnrollmentStatusesForOrganizationResponse(data, context); + const response: GetEnrollmentStatusesForOrganizationCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError = async ( +const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1277,22 +1784,6 @@ const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError = a $metadata: deserializeMetadata(output), }; break; - case "OptInRequiredException": - case "com.amazonaws.computeoptimizer#OptInRequiredException": - response = { - ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ResourceNotFoundException": - case "com.amazonaws.computeoptimizer#ResourceNotFoundException": - response = { - ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "ServiceUnavailableException": case "com.amazonaws.computeoptimizer#ServiceUnavailableException": response = { @@ -1326,27 +1817,27 @@ const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsCommandError = a return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_0GetEnrollmentStatusCommand = async ( +export const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_0GetEnrollmentStatusCommandError(output, context); + return deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_0GetEnrollmentStatusResponse(data, context); - const response: GetEnrollmentStatusCommandOutput = { + contents = deserializeAws_json1_0GetLambdaFunctionRecommendationsResponse(data, context); + const response: GetLambdaFunctionRecommendationsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_0GetEnrollmentStatusCommandError = async ( +const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1379,6 +1870,14 @@ const deserializeAws_json1_0GetEnrollmentStatusCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "LimitExceededException": + case "com.amazonaws.computeoptimizer#LimitExceededException": + response = { + ...(await deserializeAws_json1_0LimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "MissingAuthenticationToken": case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": response = { @@ -1387,6 +1886,14 @@ const deserializeAws_json1_0GetEnrollmentStatusCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "OptInRequiredException": + case "com.amazonaws.computeoptimizer#OptInRequiredException": + response = { + ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ServiceUnavailableException": case "com.amazonaws.computeoptimizer#ServiceUnavailableException": response = { @@ -1420,27 +1927,27 @@ const deserializeAws_json1_0GetEnrollmentStatusCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommand = async ( +export const deserializeAws_json1_0GetRecommendationPreferencesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError(output, context); + return deserializeAws_json1_0GetRecommendationPreferencesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_0GetEnrollmentStatusesForOrganizationResponse(data, context); - const response: GetEnrollmentStatusesForOrganizationCommandOutput = { + contents = deserializeAws_json1_0GetRecommendationPreferencesResponse(data, context); + const response: GetRecommendationPreferencesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError = async ( +const deserializeAws_json1_0GetRecommendationPreferencesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1481,6 +1988,22 @@ const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError = a $metadata: deserializeMetadata(output), }; break; + case "OptInRequiredException": + case "com.amazonaws.computeoptimizer#OptInRequiredException": + response = { + ...(await deserializeAws_json1_0OptInRequiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.computeoptimizer#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ServiceUnavailableException": case "com.amazonaws.computeoptimizer#ServiceUnavailableException": response = { @@ -1514,27 +2037,27 @@ const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationCommandError = a return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommand = async ( +export const deserializeAws_json1_0GetRecommendationSummariesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError(output, context); + return deserializeAws_json1_0GetRecommendationSummariesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_0GetLambdaFunctionRecommendationsResponse(data, context); - const response: GetLambdaFunctionRecommendationsCommandOutput = { + contents = deserializeAws_json1_0GetRecommendationSummariesResponse(data, context); + const response: GetRecommendationSummariesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError = async ( +const deserializeAws_json1_0GetRecommendationSummariesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1567,14 +2090,6 @@ const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError = async $metadata: deserializeMetadata(output), }; break; - case "LimitExceededException": - case "com.amazonaws.computeoptimizer#LimitExceededException": - response = { - ...(await deserializeAws_json1_0LimitExceededExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "MissingAuthenticationToken": case "com.amazonaws.computeoptimizer#MissingAuthenticationToken": response = { @@ -1624,27 +2139,27 @@ const deserializeAws_json1_0GetLambdaFunctionRecommendationsCommandError = async return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_0GetRecommendationSummariesCommand = async ( +export const deserializeAws_json1_0PutRecommendationPreferencesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_0GetRecommendationSummariesCommandError(output, context); + return deserializeAws_json1_0PutRecommendationPreferencesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_0GetRecommendationSummariesResponse(data, context); - const response: GetRecommendationSummariesCommandOutput = { + contents = deserializeAws_json1_0PutRecommendationPreferencesResponse(data, context); + const response: PutRecommendationPreferencesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_0GetRecommendationSummariesCommandError = async ( +const deserializeAws_json1_0PutRecommendationPreferencesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1693,6 +2208,14 @@ const deserializeAws_json1_0GetRecommendationSummariesCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotFoundException": + case "com.amazonaws.computeoptimizer#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_0ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ServiceUnavailableException": case "com.amazonaws.computeoptimizer#ServiceUnavailableException": response = { @@ -1991,6 +2514,24 @@ const serializeAws_json1_0CpuVendorArchitectures = ( }); }; +const serializeAws_json1_0DeleteRecommendationPreferencesRequest = ( + input: DeleteRecommendationPreferencesRequest, + context: __SerdeContext +): any => { + return { + ...(input.recommendationPreferenceNames !== undefined && + input.recommendationPreferenceNames !== null && { + recommendationPreferenceNames: serializeAws_json1_0RecommendationPreferenceNames( + input.recommendationPreferenceNames, + context + ), + }), + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.scope !== undefined && + input.scope !== null && { scope: serializeAws_json1_0Scope(input.scope, context) }), + }; +}; + const serializeAws_json1_0DescribeRecommendationExportJobsRequest = ( input: DescribeRecommendationExportJobsRequest, context: __SerdeContext @@ -2334,6 +2875,15 @@ const serializeAws_json1_0GetEC2RecommendationProjectedMetricsRequest = ( }; }; +const serializeAws_json1_0GetEffectiveRecommendationPreferencesRequest = ( + input: GetEffectiveRecommendationPreferencesRequest, + context: __SerdeContext +): any => { + return { + ...(input.resourceArn !== undefined && input.resourceArn !== null && { resourceArn: input.resourceArn }), + }; +}; + const serializeAws_json1_0GetEnrollmentStatusesForOrganizationRequest = ( input: GetEnrollmentStatusesForOrganizationRequest, context: __SerdeContext @@ -2371,6 +2921,19 @@ const serializeAws_json1_0GetLambdaFunctionRecommendationsRequest = ( }; }; +const serializeAws_json1_0GetRecommendationPreferencesRequest = ( + input: GetRecommendationPreferencesRequest, + context: __SerdeContext +): any => { + return { + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.scope !== undefined && + input.scope !== null && { scope: serializeAws_json1_0Scope(input.scope, context) }), + }; +}; + const serializeAws_json1_0GetRecommendationSummariesRequest = ( input: GetRecommendationSummariesRequest, context: __SerdeContext @@ -2449,6 +3012,35 @@ const serializeAws_json1_0LambdaFunctionRecommendationFilters = ( }); }; +const serializeAws_json1_0PutRecommendationPreferencesRequest = ( + input: PutRecommendationPreferencesRequest, + context: __SerdeContext +): any => { + return { + ...(input.enhancedInfrastructureMetrics !== undefined && + input.enhancedInfrastructureMetrics !== null && { + enhancedInfrastructureMetrics: input.enhancedInfrastructureMetrics, + }), + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.scope !== undefined && + input.scope !== null && { scope: serializeAws_json1_0Scope(input.scope, context) }), + }; +}; + +const serializeAws_json1_0RecommendationPreferenceNames = ( + input: (RecommendationPreferenceName | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_0RecommendationPreferences = ( input: RecommendationPreferences, context: __SerdeContext @@ -2468,6 +3060,13 @@ const serializeAws_json1_0S3DestinationConfig = (input: S3DestinationConfig, con }; }; +const serializeAws_json1_0Scope = (input: Scope, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + const serializeAws_json1_0UpdateEnrollmentStatusRequest = ( input: UpdateEnrollmentStatusRequest, context: __SerdeContext @@ -2549,6 +3148,11 @@ const deserializeAws_json1_0AutoScalingGroupRecommendation = ( output.currentConfiguration !== undefined && output.currentConfiguration !== null ? deserializeAws_json1_0AutoScalingGroupConfiguration(output.currentConfiguration, context) : undefined, + currentPerformanceRisk: __expectString(output.currentPerformanceRisk), + effectiveRecommendationPreferences: + output.effectiveRecommendationPreferences !== undefined && output.effectiveRecommendationPreferences !== null + ? deserializeAws_json1_0EffectiveRecommendationPreferences(output.effectiveRecommendationPreferences, context) + : undefined, finding: __expectString(output.finding), lastRefreshTimestamp: output.lastRefreshTimestamp !== undefined && output.lastRefreshTimestamp !== null @@ -2581,6 +3185,10 @@ const deserializeAws_json1_0AutoScalingGroupRecommendationOption = ( ? deserializeAws_json1_0ProjectedUtilizationMetrics(output.projectedUtilizationMetrics, context) : undefined, rank: __expectInt32(output.rank), + savingsOpportunity: + output.savingsOpportunity !== undefined && output.savingsOpportunity !== null + ? deserializeAws_json1_0SavingsOpportunity(output.savingsOpportunity, context) + : undefined, } as any; }; @@ -2612,6 +3220,39 @@ const deserializeAws_json1_0AutoScalingGroupRecommendations = ( }); }; +const deserializeAws_json1_0CpuVendorArchitectures = ( + output: any, + context: __SerdeContext +): (CpuVendorArchitecture | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_json1_0CurrentPerformanceRiskRatings = ( + output: any, + context: __SerdeContext +): CurrentPerformanceRiskRatings => { + return { + high: __expectLong(output.high), + low: __expectLong(output.low), + medium: __expectLong(output.medium), + veryLow: __expectLong(output.veryLow), + } as any; +}; + +const deserializeAws_json1_0DeleteRecommendationPreferencesResponse = ( + output: any, + context: __SerdeContext +): DeleteRecommendationPreferencesResponse => { + return {} as any; +}; + const deserializeAws_json1_0DescribeRecommendationExportJobsResponse = ( output: any, context: __SerdeContext @@ -2644,6 +3285,29 @@ const deserializeAws_json1_0EBSUtilizationMetrics = (output: any, context: __Ser }); }; +const deserializeAws_json1_0EffectiveRecommendationPreferences = ( + output: any, + context: __SerdeContext +): EffectiveRecommendationPreferences => { + return { + cpuVendorArchitectures: + output.cpuVendorArchitectures !== undefined && output.cpuVendorArchitectures !== null + ? deserializeAws_json1_0CpuVendorArchitectures(output.cpuVendorArchitectures, context) + : undefined, + enhancedInfrastructureMetrics: __expectString(output.enhancedInfrastructureMetrics), + } as any; +}; + +const deserializeAws_json1_0EstimatedMonthlySavings = ( + output: any, + context: __SerdeContext +): EstimatedMonthlySavings => { + return { + currency: __expectString(output.currency), + value: __limitedParseDouble(output.value), + } as any; +}; + const deserializeAws_json1_0ExportAutoScalingGroupRecommendationsResponse = ( output: any, context: __SerdeContext @@ -2768,6 +3432,15 @@ const deserializeAws_json1_0GetEC2RecommendationProjectedMetricsResponse = ( } as any; }; +const deserializeAws_json1_0GetEffectiveRecommendationPreferencesResponse = ( + output: any, + context: __SerdeContext +): GetEffectiveRecommendationPreferencesResponse => { + return { + enhancedInfrastructureMetrics: __expectString(output.enhancedInfrastructureMetrics), + } as any; +}; + const deserializeAws_json1_0GetEnrollmentStatusesForOrganizationResponse = ( output: any, context: __SerdeContext @@ -2832,6 +3505,19 @@ const deserializeAws_json1_0GetRecommendationErrors = ( }); }; +const deserializeAws_json1_0GetRecommendationPreferencesResponse = ( + output: any, + context: __SerdeContext +): GetRecommendationPreferencesResponse => { + return { + nextToken: __expectString(output.nextToken), + recommendationPreferencesDetails: + output.recommendationPreferencesDetails !== undefined && output.recommendationPreferencesDetails !== null + ? deserializeAws_json1_0RecommendationPreferencesDetails(output.recommendationPreferencesDetails, context) + : undefined, + } as any; +}; + const deserializeAws_json1_0GetRecommendationSummariesResponse = ( output: any, context: __SerdeContext @@ -2849,6 +3535,11 @@ const deserializeAws_json1_0InstanceRecommendation = (output: any, context: __Se return { accountId: __expectString(output.accountId), currentInstanceType: __expectString(output.currentInstanceType), + currentPerformanceRisk: __expectString(output.currentPerformanceRisk), + effectiveRecommendationPreferences: + output.effectiveRecommendationPreferences !== undefined && output.effectiveRecommendationPreferences !== null + ? deserializeAws_json1_0EffectiveRecommendationPreferences(output.effectiveRecommendationPreferences, context) + : undefined, finding: __expectString(output.finding), findingReasonCodes: output.findingReasonCodes !== undefined && output.findingReasonCodes !== null @@ -2906,6 +3597,10 @@ const deserializeAws_json1_0InstanceRecommendationOption = ( ? deserializeAws_json1_0ProjectedUtilizationMetrics(output.projectedUtilizationMetrics, context) : undefined, rank: __expectInt32(output.rank), + savingsOpportunity: + output.savingsOpportunity !== undefined && output.savingsOpportunity !== null + ? deserializeAws_json1_0SavingsOpportunity(output.savingsOpportunity, context) + : undefined, } as any; }; @@ -2977,6 +3672,10 @@ const deserializeAws_json1_0LambdaFunctionMemoryRecommendationOption = ( ? deserializeAws_json1_0LambdaFunctionMemoryProjectedMetrics(output.projectedUtilizationMetrics, context) : undefined, rank: __expectInt32(output.rank), + savingsOpportunity: + output.savingsOpportunity !== undefined && output.savingsOpportunity !== null + ? deserializeAws_json1_0SavingsOpportunity(output.savingsOpportunity, context) + : undefined, } as any; }; @@ -3001,6 +3700,7 @@ const deserializeAws_json1_0LambdaFunctionRecommendation = ( return { accountId: __expectString(output.accountId), currentMemorySize: __expectInt32(output.currentMemorySize), + currentPerformanceRisk: __expectString(output.currentPerformanceRisk), finding: __expectString(output.finding), findingReasonCodes: output.findingReasonCodes !== undefined && output.findingReasonCodes !== null @@ -3166,6 +3866,13 @@ const deserializeAws_json1_0ProjectedUtilizationMetrics = ( }); }; +const deserializeAws_json1_0PutRecommendationPreferencesResponse = ( + output: any, + context: __SerdeContext +): PutRecommendationPreferencesResponse => { + return {} as any; +}; + const deserializeAws_json1_0ReasonCodeSummaries = (output: any, context: __SerdeContext): ReasonCodeSummary[] => { return (output || []) .filter((e: any) => e != null) @@ -3236,6 +3943,34 @@ const deserializeAws_json1_0RecommendationOptions = ( }); }; +const deserializeAws_json1_0RecommendationPreferencesDetail = ( + output: any, + context: __SerdeContext +): RecommendationPreferencesDetail => { + return { + enhancedInfrastructureMetrics: __expectString(output.enhancedInfrastructureMetrics), + resourceType: __expectString(output.resourceType), + scope: + output.scope !== undefined && output.scope !== null + ? deserializeAws_json1_0Scope(output.scope, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_0RecommendationPreferencesDetails = ( + output: any, + context: __SerdeContext +): RecommendationPreferencesDetail[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_0RecommendationPreferencesDetail(entry, context); + }); +}; + const deserializeAws_json1_0RecommendationSource = (output: any, context: __SerdeContext): RecommendationSource => { return { recommendationSourceArn: __expectString(output.recommendationSourceArn), @@ -3271,7 +4006,15 @@ const deserializeAws_json1_0RecommendationSummaries = ( const deserializeAws_json1_0RecommendationSummary = (output: any, context: __SerdeContext): RecommendationSummary => { return { accountId: __expectString(output.accountId), + currentPerformanceRiskRatings: + output.currentPerformanceRiskRatings !== undefined && output.currentPerformanceRiskRatings !== null + ? deserializeAws_json1_0CurrentPerformanceRiskRatings(output.currentPerformanceRiskRatings, context) + : undefined, recommendationResourceType: __expectString(output.recommendationResourceType), + savingsOpportunity: + output.savingsOpportunity !== undefined && output.savingsOpportunity !== null + ? deserializeAws_json1_0SavingsOpportunity(output.savingsOpportunity, context) + : undefined, summaries: output.summaries !== undefined && output.summaries !== null ? deserializeAws_json1_0Summaries(output.summaries, context) @@ -3324,6 +4067,23 @@ const deserializeAws_json1_0S3Destination = (output: any, context: __SerdeContex } as any; }; +const deserializeAws_json1_0SavingsOpportunity = (output: any, context: __SerdeContext): SavingsOpportunity => { + return { + estimatedMonthlySavings: + output.estimatedMonthlySavings !== undefined && output.estimatedMonthlySavings !== null + ? deserializeAws_json1_0EstimatedMonthlySavings(output.estimatedMonthlySavings, context) + : undefined, + savingsOpportunityPercentage: __limitedParseDouble(output.savingsOpportunityPercentage), + } as any; +}; + +const deserializeAws_json1_0Scope = (output: any, context: __SerdeContext): Scope => { + return { + name: __expectString(output.name), + value: __expectString(output.value), + } as any; +}; + const deserializeAws_json1_0ServiceUnavailableException = ( output: any, context: __SerdeContext @@ -3419,6 +4179,7 @@ const deserializeAws_json1_0VolumeRecommendation = (output: any, context: __Serd output.currentConfiguration !== undefined && output.currentConfiguration !== null ? deserializeAws_json1_0VolumeConfiguration(output.currentConfiguration, context) : undefined, + currentPerformanceRisk: __expectString(output.currentPerformanceRisk), finding: __expectString(output.finding), lastRefreshTimestamp: output.lastRefreshTimestamp !== undefined && output.lastRefreshTimestamp !== null @@ -3448,6 +4209,10 @@ const deserializeAws_json1_0VolumeRecommendationOption = ( : undefined, performanceRisk: __limitedParseDouble(output.performanceRisk), rank: __expectInt32(output.rank), + savingsOpportunity: + output.savingsOpportunity !== undefined && output.savingsOpportunity !== null + ? deserializeAws_json1_0SavingsOpportunity(output.savingsOpportunity, context) + : undefined, } as any; }; diff --git a/clients/client-dataexchange/src/DataExchange.ts b/clients/client-dataexchange/src/DataExchange.ts index 730a17b86e73..5a2dbdddc811 100644 --- a/clients/client-dataexchange/src/DataExchange.ts +++ b/clients/client-dataexchange/src/DataExchange.ts @@ -68,6 +68,11 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + SendApiAssetCommand, + SendApiAssetCommandInput, + SendApiAssetCommandOutput, +} from "./commands/SendApiAssetCommand"; import { StartJobCommand, StartJobCommandInput, StartJobCommandOutput } from "./commands/StartJobCommand"; import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { @@ -686,6 +691,35 @@ export class DataExchange extends DataExchangeClient { } } + /** + *

                                  This operation invokes an API Gateway API asset. The request is proxied to the provider’s API Gateway API.

                                  + */ + public sendApiAsset( + args: SendApiAssetCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public sendApiAsset(args: SendApiAssetCommandInput, cb: (err: any, data?: SendApiAssetCommandOutput) => void): void; + public sendApiAsset( + args: SendApiAssetCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: SendApiAssetCommandOutput) => void + ): void; + public sendApiAsset( + args: SendApiAssetCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: SendApiAssetCommandOutput) => void), + cb?: (err: any, data?: SendApiAssetCommandOutput) => void + ): Promise | void { + const command = new SendApiAssetCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  This operation starts a job.

                                  */ diff --git a/clients/client-dataexchange/src/DataExchangeClient.ts b/clients/client-dataexchange/src/DataExchangeClient.ts index bb8338c3b107..4ecbf0318ecc 100644 --- a/clients/client-dataexchange/src/DataExchangeClient.ts +++ b/clients/client-dataexchange/src/DataExchangeClient.ts @@ -75,6 +75,7 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { SendApiAssetCommandInput, SendApiAssetCommandOutput } from "./commands/SendApiAssetCommand"; import { StartJobCommandInput, StartJobCommandOutput } from "./commands/StartJobCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; @@ -105,6 +106,7 @@ export type ServiceInputTypes = | ListJobsCommandInput | ListRevisionAssetsCommandInput | ListTagsForResourceCommandInput + | SendApiAssetCommandInput | StartJobCommandInput | TagResourceCommandInput | UntagResourceCommandInput @@ -134,6 +136,7 @@ export type ServiceOutputTypes = | ListJobsCommandOutput | ListRevisionAssetsCommandOutput | ListTagsForResourceCommandOutput + | SendApiAssetCommandOutput | StartJobCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput diff --git a/clients/client-dataexchange/src/commands/SendApiAssetCommand.ts b/clients/client-dataexchange/src/commands/SendApiAssetCommand.ts new file mode 100644 index 000000000000..9c40f6c460c5 --- /dev/null +++ b/clients/client-dataexchange/src/commands/SendApiAssetCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DataExchangeClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../DataExchangeClient"; +import { SendApiAssetRequest, SendApiAssetResponse } from "../models/models_0"; +import { + deserializeAws_restJson1SendApiAssetCommand, + serializeAws_restJson1SendApiAssetCommand, +} from "../protocols/Aws_restJson1"; + +export interface SendApiAssetCommandInput extends SendApiAssetRequest {} +export interface SendApiAssetCommandOutput extends SendApiAssetResponse, __MetadataBearer {} + +/** + *

                                  This operation invokes an API Gateway API asset. The request is proxied to the provider’s API Gateway API.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { DataExchangeClient, SendApiAssetCommand } from "@aws-sdk/client-dataexchange"; // ES Modules import + * // const { DataExchangeClient, SendApiAssetCommand } = require("@aws-sdk/client-dataexchange"); // CommonJS import + * const client = new DataExchangeClient(config); + * const command = new SendApiAssetCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link SendApiAssetCommandInput} for command's `input` shape. + * @see {@link SendApiAssetCommandOutput} for command's `response` shape. + * @see {@link DataExchangeClientResolvedConfig | config} for DataExchangeClient's `config` shape. + * + */ +export class SendApiAssetCommand extends $Command< + SendApiAssetCommandInput, + SendApiAssetCommandOutput, + DataExchangeClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: SendApiAssetCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: DataExchangeClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "DataExchangeClient"; + const commandName = "SendApiAssetCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: SendApiAssetRequest.filterSensitiveLog, + outputFilterSensitiveLog: SendApiAssetResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: SendApiAssetCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1SendApiAssetCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1SendApiAssetCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-dataexchange/src/commands/index.ts b/clients/client-dataexchange/src/commands/index.ts index a8ddcb28d92b..2043b24aa8ad 100644 --- a/clients/client-dataexchange/src/commands/index.ts +++ b/clients/client-dataexchange/src/commands/index.ts @@ -18,6 +18,7 @@ export * from "./ListEventActionsCommand"; export * from "./ListJobsCommand"; export * from "./ListRevisionAssetsCommand"; export * from "./ListTagsForResourceCommand"; +export * from "./SendApiAssetCommand"; export * from "./StartJobCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; diff --git a/clients/client-dataexchange/src/models/models_0.ts b/clients/client-dataexchange/src/models/models_0.ts index 72eebae8786e..23960c486fe8 100644 --- a/clients/client-dataexchange/src/models/models_0.ts +++ b/clients/client-dataexchange/src/models/models_0.ts @@ -117,6 +117,69 @@ export namespace Action { }); } +export enum ProtocolType { + REST = "REST", +} + +/** + *

                                  The API Gateway API that is the asset.

                                  + */ +export interface ApiGatewayApiAsset { + /** + *

                                  The API description of the API asset.

                                  + */ + ApiDescription?: string; + + /** + *

                                  The API endpoint of the API asset.

                                  + */ + ApiEndpoint?: string; + + /** + *

                                  The unique identifier of the API asset.

                                  + */ + ApiId?: string; + + /** + *

                                  The API key of the API asset.

                                  + */ + ApiKey?: string; + + /** + *

                                  The API name of the API asset.

                                  + */ + ApiName?: string; + + /** + *

                                  The download URL of the API specification of the API asset.

                                  + */ + ApiSpecificationDownloadUrl?: string; + + /** + *

                                  The date and time that the upload URL expires, in ISO 8601 format.

                                  + */ + ApiSpecificationDownloadUrlExpiresAt?: Date; + + /** + *

                                  The protocol type of the API asset.

                                  + */ + ProtocolType?: ProtocolType | string; + + /** + *

                                  The stage of the API asset.

                                  + */ + Stage?: string; +} + +export namespace ApiGatewayApiAsset { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ApiGatewayApiAsset): any => ({ + ...obj, + }); +} + /** *

                                  The destination for the asset.

                                  */ @@ -197,6 +260,11 @@ export interface AssetDetails { *

                                  The Amazon Redshift datashare that is the asset.

                                  */ RedshiftDataShareAsset?: RedshiftDataShareAsset; + + /** + *

                                  Information about the API Gateway API asset.

                                  + */ + ApiGatewayApiAsset?: ApiGatewayApiAsset; } export namespace AssetDetails { @@ -209,12 +277,13 @@ export namespace AssetDetails { } export enum AssetType { + API_GATEWAY_API = "API_GATEWAY_API", REDSHIFT_DATA_SHARE = "REDSHIFT_DATA_SHARE", S3_SNAPSHOT = "S3_SNAPSHOT", } /** - *

                                  An asset in AWS Data Exchange is a piece of data. The asset can be a structured data file, an image file, or some other data file that can be stored as an S3 object, or an Amazon Redshift datashare (Preview). When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.

                                  + *

                                  An asset in AWS Data Exchange is a piece of data (S3 object) or a means of fulfilling data (Amazon Redshift datashare or Amazon API Gateway API). The asset can be a structured data file, an image file, or some other data file that can be stored as an S3 object, an Amazon API Gateway API, or an Amazon Redshift datashare (Preview). When you create an import job for your files, API Gateway APIs, or Amazon Redshift datashares, you create an asset in AWS Data Exchange.

                                  */ export interface AssetEntry { /** @@ -248,7 +317,7 @@ export interface AssetEntry { Id: string | undefined; /** - *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                  + *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                  */ Name: string | undefined; @@ -595,6 +664,7 @@ export namespace CreateDataSetResponse { } export enum LimitName { + Amazon_API_Gateway_API_assets_per_revision = "Amazon API Gateway API assets per revision", Amazon_Redshift_datashare_assets_per_import_job_from_Redshift = "Amazon Redshift datashare assets per import job from Redshift", Amazon_Redshift_datashare_assets_per_revision = "Amazon Redshift datashare assets per revision", Asset_per_export_job_from_Amazon_S3 = "Asset per export job from Amazon S3", @@ -608,10 +678,12 @@ export enum LimitName { Concurrent_in_progress_jobs_to_import_assets_from_Amazon_Redshift_datashares = "Concurrent in progress jobs to import assets from Amazon Redshift datashares", Concurrent_in_progress_jobs_to_import_assets_from_Amazon_S3 = "Concurrent in progress jobs to import assets from Amazon S3", Concurrent_in_progress_jobs_to_import_assets_from_a_signed_URL = "Concurrent in progress jobs to import assets from a signed URL", + Concurrent_in_progress_jobs_to_import_assets_from_an_API_Gateway_API = "Concurrent in progress jobs to import assets from an API Gateway API", Data_sets_per_account = "Data sets per account", Data_sets_per_product = "Data sets per product", Event_actions_per_account = "Event actions per account", Products_per_account = "Products per account", + Revisions_per_Amazon_API_Gateway_API_data_set = "Revisions per Amazon API Gateway API data set", Revisions_per_Amazon_Redshift_datashare_data_set = "Revisions per Amazon Redshift datashare data set", Revisions_per_data_set = "Revisions per data set", } @@ -871,6 +943,65 @@ export namespace ExportRevisionsToS3RequestDetails { }); } +/** + *

                                  The request details.

                                  + */ +export interface ImportAssetFromApiGatewayApiRequestDetails { + /** + *

                                  The API description. Markdown supported.

                                  + */ + ApiDescription?: string; + + /** + *

                                  The API Gateway API ID.

                                  + */ + ApiId: string | undefined; + + /** + *

                                  The API Gateway API key.

                                  + */ + ApiKey?: string; + + /** + *

                                  The API name.

                                  + */ + ApiName: string | undefined; + + /** + *

                                  The Base64-encoded MD5 hash of the OpenAPI 3.0 JSON API specification file. It is used to ensure the integrity of the file.

                                  + */ + ApiSpecificationMd5Hash: string | undefined; + + /** + *

                                  The data set ID.

                                  + */ + DataSetId: string | undefined; + + /** + *

                                  The protocol type.

                                  + */ + ProtocolType: ProtocolType | string | undefined; + + /** + *

                                  The revision ID.

                                  + */ + RevisionId: string | undefined; + + /** + *

                                  The API stage.

                                  + */ + Stage: string | undefined; +} + +export namespace ImportAssetFromApiGatewayApiRequestDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportAssetFromApiGatewayApiRequestDetails): any => ({ + ...obj, + }); +} + /** *

                                  Details of the operation to be performed by the job.

                                  */ @@ -1015,6 +1146,11 @@ export interface RequestDetails { *

                                  Details from an import from Amazon Redshift datashare request.

                                  */ ImportAssetsFromRedshiftDataShares?: ImportAssetsFromRedshiftDataSharesRequestDetails; + + /** + *

                                  Information about the import asset from API Gateway API request.

                                  + */ + ImportAssetFromApiGatewayApi?: ImportAssetFromApiGatewayApiRequestDetails; } export namespace RequestDetails { @@ -1032,6 +1168,7 @@ export enum Type { EXPORT_REVISIONS_TO_S3 = "EXPORT_REVISIONS_TO_S3", IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES = "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES", IMPORT_ASSETS_FROM_S3 = "IMPORT_ASSETS_FROM_S3", + IMPORT_ASSET_FROM_API_GATEWAY_API = "IMPORT_ASSET_FROM_API_GATEWAY_API", IMPORT_ASSET_FROM_SIGNED_URL = "IMPORT_ASSET_FROM_SIGNED_URL", } @@ -1166,6 +1303,75 @@ export namespace ExportRevisionsToS3ResponseDetails { }); } +/** + *

                                  The response details.

                                  + */ +export interface ImportAssetFromApiGatewayApiResponseDetails { + /** + *

                                  The API description.

                                  + */ + ApiDescription?: string; + + /** + *

                                  The API ID.

                                  + */ + ApiId: string | undefined; + + /** + *

                                  The API key.

                                  + */ + ApiKey?: string; + + /** + *

                                  The API name.

                                  + */ + ApiName: string | undefined; + + /** + *

                                  The Base64-encoded Md5 hash for the API asset, used to ensure the integrity of the API at that location.

                                  + */ + ApiSpecificationMd5Hash: string | undefined; + + /** + *

                                  The upload URL of the API specification.

                                  + */ + ApiSpecificationUploadUrl: string | undefined; + + /** + *

                                  The date and time that the upload URL expires, in ISO 8601 format.

                                  + */ + ApiSpecificationUploadUrlExpiresAt: Date | undefined; + + /** + *

                                  The data set ID.

                                  + */ + DataSetId: string | undefined; + + /** + *

                                  The protocol type.

                                  + */ + ProtocolType: ProtocolType | string | undefined; + + /** + *

                                  The revision ID.

                                  + */ + RevisionId: string | undefined; + + /** + *

                                  The API stage.

                                  + */ + Stage: string | undefined; +} + +export namespace ImportAssetFromApiGatewayApiResponseDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportAssetFromApiGatewayApiResponseDetails): any => ({ + ...obj, + }); +} + /** *

                                  The details in the response for an import request, including the signed URL and other information.

                                  */ @@ -1301,6 +1507,11 @@ export interface ResponseDetails { *

                                  Details from an import from Amazon Redshift datashare response.

                                  */ ImportAssetsFromRedshiftDataShares?: ImportAssetsFromRedshiftDataSharesResponseDetails; + + /** + *

                                  The response details.

                                  + */ + ImportAssetFromApiGatewayApi?: ImportAssetFromApiGatewayApiResponseDetails; } export namespace ResponseDetails { @@ -1698,7 +1909,7 @@ export interface GetAssetResponse { Id?: string; /** - *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                  + *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                  */ Name?: string; @@ -2503,6 +2714,81 @@ export namespace ListTagsForResourceResponse { }); } +/** + *

                                  The request body for SendApiAsset.

                                  + */ +export interface SendApiAssetRequest { + /** + *

                                  The request body.

                                  + */ + Body?: string; + + /** + *

                                  Attach query string parameters to the end of the URI (for example, /v1/examplePath?exampleParam=exampleValue).

                                  + */ + QueryStringParameters?: { [key: string]: string }; + + /** + *

                                  Asset ID value for the API request.

                                  + */ + AssetId: string | undefined; + + /** + *

                                  Data set ID value for the API request.

                                  + */ + DataSetId: string | undefined; + + /** + *

                                  Any header value prefixed with x-amzn-dataexchange-header- will have that stripped before sending the Asset API request. Use this when you want to override a header that AWS Data Exchange uses. Alternatively, you can use the header without a prefix to the HTTP request.

                                  + */ + RequestHeaders?: { [key: string]: string }; + + /** + *

                                  HTTP method value for the API request. Alternatively, you can use the appropriate verb in your request.

                                  + */ + Method?: string; + + /** + *

                                  URI path value for the API request. Alternatively, you can set the URI path directly by invoking /v1/{pathValue}

                                  + */ + Path?: string; + + /** + *

                                  Revision ID value for the API request.

                                  + */ + RevisionId: string | undefined; +} + +export namespace SendApiAssetRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SendApiAssetRequest): any => ({ + ...obj, + }); +} + +export interface SendApiAssetResponse { + /** + *

                                  The response body from the underlying API tracked by the API asset.

                                  + */ + Body?: string; + + /** + *

                                  The response headers from the underlying API tracked by the API asset.

                                  + */ + ResponseHeaders?: { [key: string]: string }; +} + +export namespace SendApiAssetResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SendApiAssetResponse): any => ({ + ...obj, + }); +} + export interface StartJobRequest { /** *

                                  The unique identifier for a job.

                                  @@ -2590,7 +2876,7 @@ export interface UpdateAssetRequest { DataSetId: string | undefined; /** - *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                  + *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                  */ Name: string | undefined; @@ -2641,7 +2927,7 @@ export interface UpdateAssetResponse { Id?: string; /** - *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                  + *

                                  The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                  */ Name?: string; diff --git a/clients/client-dataexchange/src/protocols/Aws_restJson1.ts b/clients/client-dataexchange/src/protocols/Aws_restJson1.ts index cd39872e58f5..019d96f90342 100644 --- a/clients/client-dataexchange/src/protocols/Aws_restJson1.ts +++ b/clients/client-dataexchange/src/protocols/Aws_restJson1.ts @@ -1,4 +1,8 @@ -import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + HttpRequest as __HttpRequest, + HttpResponse as __HttpResponse, + isValidHostname as __isValidHostname, +} from "@aws-sdk/protocol-http"; import { expectBoolean as __expectBoolean, expectNonNull as __expectNonNull, @@ -42,6 +46,7 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "../commands/ListTagsForResourceCommand"; +import { SendApiAssetCommandInput, SendApiAssetCommandOutput } from "../commands/SendApiAssetCommand"; import { StartJobCommandInput, StartJobCommandOutput } from "../commands/StartJobCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; @@ -52,6 +57,7 @@ import { UpdateRevisionCommandInput, UpdateRevisionCommandOutput } from "../comm import { AccessDeniedException, Action, + ApiGatewayApiAsset, AssetDestinationEntry, AssetDetails, AssetEntry, @@ -70,6 +76,8 @@ import { ExportRevisionsToS3RequestDetails, ExportRevisionsToS3ResponseDetails, ExportServerSideEncryption, + ImportAssetFromApiGatewayApiRequestDetails, + ImportAssetFromApiGatewayApiResponseDetails, ImportAssetFromSignedUrlJobErrorDetails, ImportAssetFromSignedUrlRequestDetails, ImportAssetFromSignedUrlResponseDetails, @@ -740,6 +748,54 @@ export const serializeAws_restJson1ListTagsForResourceCommand = async ( }); }; +export const serializeAws_restJson1SendApiAssetCommand = async ( + input: SendApiAssetCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "text/plain", + ...(isSerializableHeaderValue(input.AssetId) && { "x-amzn-dataexchange-asset-id": input.AssetId! }), + ...(isSerializableHeaderValue(input.DataSetId) && { "x-amzn-dataexchange-data-set-id": input.DataSetId! }), + ...(isSerializableHeaderValue(input.Method) && { "x-amzn-dataexchange-http-method": input.Method! }), + ...(isSerializableHeaderValue(input.Path) && { "x-amzn-dataexchange-path": input.Path! }), + ...(isSerializableHeaderValue(input.RevisionId) && { "x-amzn-dataexchange-revision-id": input.RevisionId! }), + ...(input.RequestHeaders !== undefined && + Object.keys(input.RequestHeaders).reduce( + (acc: any, suffix: string) => ({ + ...acc, + [`x-amzn-dataexchange-header-${suffix.toLowerCase()}`]: input.RequestHeaders![suffix], + }), + {} + )), + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/v1"; + const query: any = { + ...(input.QueryStringParameters !== undefined && input.QueryStringParameters), + }; + let body: any; + if (input.Body !== undefined) { + body = input.Body; + } + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api-fulfill." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1StartJobCommand = async ( input: StartJobCommandInput, context: __SerdeContext @@ -2914,6 +2970,100 @@ const deserializeAws_restJson1ListTagsForResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1SendApiAssetCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1SendApiAssetCommandError(output, context); + } + const contents: SendApiAssetCommandOutput = { + $metadata: deserializeMetadata(output), + Body: undefined, + ResponseHeaders: undefined, + }; + Object.keys(output.headers).forEach((header) => { + if (contents.ResponseHeaders === undefined) { + contents.ResponseHeaders = {}; + } + if (header.startsWith("")) { + contents.ResponseHeaders[header.substring(0)] = output.headers[header]; + } + }); + const data: any = await collectBodyString(output.body, context); + contents.Body = __expectString(data); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1SendApiAssetCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.dataexchange#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.dataexchange#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.dataexchange#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.dataexchange#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.dataexchange#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1StartJobCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -3843,6 +3993,25 @@ const serializeAws_restJson1ExportServerSideEncryption = ( }; }; +const serializeAws_restJson1ImportAssetFromApiGatewayApiRequestDetails = ( + input: ImportAssetFromApiGatewayApiRequestDetails, + context: __SerdeContext +): any => { + return { + ...(input.ApiDescription !== undefined && + input.ApiDescription !== null && { ApiDescription: input.ApiDescription }), + ...(input.ApiId !== undefined && input.ApiId !== null && { ApiId: input.ApiId }), + ...(input.ApiKey !== undefined && input.ApiKey !== null && { ApiKey: input.ApiKey }), + ...(input.ApiName !== undefined && input.ApiName !== null && { ApiName: input.ApiName }), + ...(input.ApiSpecificationMd5Hash !== undefined && + input.ApiSpecificationMd5Hash !== null && { ApiSpecificationMd5Hash: input.ApiSpecificationMd5Hash }), + ...(input.DataSetId !== undefined && input.DataSetId !== null && { DataSetId: input.DataSetId }), + ...(input.ProtocolType !== undefined && input.ProtocolType !== null && { ProtocolType: input.ProtocolType }), + ...(input.RevisionId !== undefined && input.RevisionId !== null && { RevisionId: input.RevisionId }), + ...(input.Stage !== undefined && input.Stage !== null && { Stage: input.Stage }), + }; +}; + const serializeAws_restJson1ImportAssetFromSignedUrlRequestDetails = ( input: ImportAssetFromSignedUrlRequestDetails, context: __SerdeContext @@ -3977,6 +4146,13 @@ const serializeAws_restJson1RequestDetails = (input: RequestDetails, context: __ context ), }), + ...(input.ImportAssetFromApiGatewayApi !== undefined && + input.ImportAssetFromApiGatewayApi !== null && { + ImportAssetFromApiGatewayApi: serializeAws_restJson1ImportAssetFromApiGatewayApiRequestDetails( + input.ImportAssetFromApiGatewayApi, + context + ), + }), ...(input.ImportAssetFromSignedUrl !== undefined && input.ImportAssetFromSignedUrl !== null && { ImportAssetFromSignedUrl: serializeAws_restJson1ImportAssetFromSignedUrlRequestDetails( @@ -4024,6 +4200,23 @@ const deserializeAws_restJson1Action = (output: any, context: __SerdeContext): A } as any; }; +const deserializeAws_restJson1ApiGatewayApiAsset = (output: any, context: __SerdeContext): ApiGatewayApiAsset => { + return { + ApiDescription: __expectString(output.ApiDescription), + ApiEndpoint: __expectString(output.ApiEndpoint), + ApiId: __expectString(output.ApiId), + ApiKey: __expectString(output.ApiKey), + ApiName: __expectString(output.ApiName), + ApiSpecificationDownloadUrl: __expectString(output.ApiSpecificationDownloadUrl), + ApiSpecificationDownloadUrlExpiresAt: + output.ApiSpecificationDownloadUrlExpiresAt !== undefined && output.ApiSpecificationDownloadUrlExpiresAt !== null + ? __expectNonNull(__parseRfc3339DateTime(output.ApiSpecificationDownloadUrlExpiresAt)) + : undefined, + ProtocolType: __expectString(output.ProtocolType), + Stage: __expectString(output.Stage), + } as any; +}; + const deserializeAws_restJson1AssetDestinationEntry = (output: any, context: __SerdeContext): AssetDestinationEntry => { return { AssetId: __expectString(output.AssetId), @@ -4034,6 +4227,10 @@ const deserializeAws_restJson1AssetDestinationEntry = (output: any, context: __S const deserializeAws_restJson1AssetDetails = (output: any, context: __SerdeContext): AssetDetails => { return { + ApiGatewayApiAsset: + output.ApiGatewayApiAsset !== undefined && output.ApiGatewayApiAsset !== null + ? deserializeAws_restJson1ApiGatewayApiAsset(output.ApiGatewayApiAsset, context) + : undefined, RedshiftDataShareAsset: output.RedshiftDataShareAsset !== undefined && output.RedshiftDataShareAsset !== null ? deserializeAws_restJson1RedshiftDataShareAsset(output.RedshiftDataShareAsset, context) @@ -4237,6 +4434,28 @@ const deserializeAws_restJson1ExportServerSideEncryption = ( } as any; }; +const deserializeAws_restJson1ImportAssetFromApiGatewayApiResponseDetails = ( + output: any, + context: __SerdeContext +): ImportAssetFromApiGatewayApiResponseDetails => { + return { + ApiDescription: __expectString(output.ApiDescription), + ApiId: __expectString(output.ApiId), + ApiKey: __expectString(output.ApiKey), + ApiName: __expectString(output.ApiName), + ApiSpecificationMd5Hash: __expectString(output.ApiSpecificationMd5Hash), + ApiSpecificationUploadUrl: __expectString(output.ApiSpecificationUploadUrl), + ApiSpecificationUploadUrlExpiresAt: + output.ApiSpecificationUploadUrlExpiresAt !== undefined && output.ApiSpecificationUploadUrlExpiresAt !== null + ? __expectNonNull(__parseRfc3339DateTime(output.ApiSpecificationUploadUrlExpiresAt)) + : undefined, + DataSetId: __expectString(output.DataSetId), + ProtocolType: __expectString(output.ProtocolType), + RevisionId: __expectString(output.RevisionId), + Stage: __expectString(output.Stage), + } as any; +}; + const deserializeAws_restJson1ImportAssetFromSignedUrlJobErrorDetails = ( output: any, context: __SerdeContext @@ -4500,6 +4719,13 @@ const deserializeAws_restJson1ResponseDetails = (output: any, context: __SerdeCo output.ExportRevisionsToS3 !== undefined && output.ExportRevisionsToS3 !== null ? deserializeAws_restJson1ExportRevisionsToS3ResponseDetails(output.ExportRevisionsToS3, context) : undefined, + ImportAssetFromApiGatewayApi: + output.ImportAssetFromApiGatewayApi !== undefined && output.ImportAssetFromApiGatewayApi !== null + ? deserializeAws_restJson1ImportAssetFromApiGatewayApiResponseDetails( + output.ImportAssetFromApiGatewayApi, + context + ) + : undefined, ImportAssetFromSignedUrl: output.ImportAssetFromSignedUrl !== undefined && output.ImportAssetFromSignedUrl !== null ? deserializeAws_restJson1ImportAssetFromSignedUrlResponseDetails(output.ImportAssetFromSignedUrl, context) diff --git a/clients/client-ec2/src/EC2.ts b/clients/client-ec2/src/EC2.ts index 6bb94785b9a6..1688872d2527 100644 --- a/clients/client-ec2/src/EC2.ts +++ b/clients/client-ec2/src/EC2.ts @@ -1259,6 +1259,11 @@ import { DescribeSnapshotsCommandInput, DescribeSnapshotsCommandOutput, } from "./commands/DescribeSnapshotsCommand"; +import { + DescribeSnapshotTierStatusCommand, + DescribeSnapshotTierStatusCommandInput, + DescribeSnapshotTierStatusCommandOutput, +} from "./commands/DescribeSnapshotTierStatusCommand"; import { DescribeSpotDatafeedSubscriptionCommand, DescribeSpotDatafeedSubscriptionCommandInput, @@ -1801,6 +1806,11 @@ import { ImportVolumeCommandInput, ImportVolumeCommandOutput, } from "./commands/ImportVolumeCommand"; +import { + ListSnapshotsInRecycleBinCommand, + ListSnapshotsInRecycleBinCommandInput, + ListSnapshotsInRecycleBinCommandOutput, +} from "./commands/ListSnapshotsInRecycleBinCommand"; import { ModifyAddressAttributeCommand, ModifyAddressAttributeCommandInput, @@ -1928,6 +1938,11 @@ import { ModifySnapshotAttributeCommandInput, ModifySnapshotAttributeCommandOutput, } from "./commands/ModifySnapshotAttributeCommand"; +import { + ModifySnapshotTierCommand, + ModifySnapshotTierCommandInput, + ModifySnapshotTierCommandOutput, +} from "./commands/ModifySnapshotTierCommand"; import { ModifySpotFleetRequestCommand, ModifySpotFleetRequestCommandInput, @@ -2213,6 +2228,16 @@ import { RestoreManagedPrefixListVersionCommandInput, RestoreManagedPrefixListVersionCommandOutput, } from "./commands/RestoreManagedPrefixListVersionCommand"; +import { + RestoreSnapshotFromRecycleBinCommand, + RestoreSnapshotFromRecycleBinCommandInput, + RestoreSnapshotFromRecycleBinCommandOutput, +} from "./commands/RestoreSnapshotFromRecycleBinCommand"; +import { + RestoreSnapshotTierCommand, + RestoreSnapshotTierCommandInput, + RestoreSnapshotTierCommandOutput, +} from "./commands/RestoreSnapshotTierCommand"; import { RevokeClientVpnIngressCommand, RevokeClientVpnIngressCommandInput, @@ -11841,6 +11866,38 @@ export class EC2 extends EC2Client { } } + /** + *

                                  Describes the storage tier status of one or more Amazon EBS snapshots.

                                  + */ + public describeSnapshotTierStatus( + args: DescribeSnapshotTierStatusCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeSnapshotTierStatus( + args: DescribeSnapshotTierStatusCommandInput, + cb: (err: any, data?: DescribeSnapshotTierStatusCommandOutput) => void + ): void; + public describeSnapshotTierStatus( + args: DescribeSnapshotTierStatusCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeSnapshotTierStatusCommandOutput) => void + ): void; + public describeSnapshotTierStatus( + args: DescribeSnapshotTierStatusCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeSnapshotTierStatusCommandOutput) => void), + cb?: (err: any, data?: DescribeSnapshotTierStatusCommandOutput) => void + ): Promise | void { + const command = new DescribeSnapshotTierStatusCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Describes the data feed for Spot Instances. For more information, see Spot * Instance data feed in the Amazon EC2 User Guide for Linux Instances.

                                  @@ -15721,6 +15778,38 @@ export class EC2 extends EC2Client { } } + /** + *

                                  Lists one or more snapshots that are currently in the Recycle Bin.

                                  + */ + public listSnapshotsInRecycleBin( + args: ListSnapshotsInRecycleBinCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listSnapshotsInRecycleBin( + args: ListSnapshotsInRecycleBinCommandInput, + cb: (err: any, data?: ListSnapshotsInRecycleBinCommandOutput) => void + ): void; + public listSnapshotsInRecycleBin( + args: ListSnapshotsInRecycleBinCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListSnapshotsInRecycleBinCommandOutput) => void + ): void; + public listSnapshotsInRecycleBin( + args: ListSnapshotsInRecycleBinCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListSnapshotsInRecycleBinCommandOutput) => void), + cb?: (err: any, data?: ListSnapshotsInRecycleBinCommandOutput) => void + ): Promise | void { + const command = new ListSnapshotsInRecycleBinCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Modifies an attribute of the specified Elastic IP address. For requirements, see Using reverse DNS for email applications.

                                  */ @@ -16755,6 +16844,42 @@ export class EC2 extends EC2Client { } } + /** + *

                                  Archives an Amazon EBS snapshot. When you archive a snapshot, it is converted to a full + * snapshot that includes all of the blocks of data that were written to the volume at the + * time the snapshot was created, and moved from the standard tier to the archive + * tier. For more information, see Archive Amazon EBS snapshots + * in the Amazon Elastic Compute Cloud User Guide.

                                  + */ + public modifySnapshotTier( + args: ModifySnapshotTierCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public modifySnapshotTier( + args: ModifySnapshotTierCommandInput, + cb: (err: any, data?: ModifySnapshotTierCommandOutput) => void + ): void; + public modifySnapshotTier( + args: ModifySnapshotTierCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ModifySnapshotTierCommandOutput) => void + ): void; + public modifySnapshotTier( + args: ModifySnapshotTierCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ModifySnapshotTierCommandOutput) => void), + cb?: (err: any, data?: ModifySnapshotTierCommandOutput) => void + ): Promise | void { + const command = new ModifySnapshotTierCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Modifies the specified Spot Fleet request.

                                  *

                                  You can only modify a Spot Fleet request of type maintain.

                                  @@ -16813,6 +16938,36 @@ export class EC2 extends EC2Client { /** *

                                  Modifies a subnet attribute. You can only modify one attribute at a time.

                                  + * + *

                                  Use this action to modify subnets on Amazon Web Services Outposts.

                                  + *
                                    + *
                                  • + *

                                    To modify a subnet on an Outpost rack, set both + * MapCustomerOwnedIpOnLaunch and + * CustomerOwnedIpv4Pool. These two parameters act as a single + * attribute.

                                    + *
                                  • + *
                                  • + *

                                    To modify a subnet on an Outpost server, set either + * EnableLniAtDeviceIndex or + * DisableLniAtDeviceIndex.

                                    + *
                                  • + *
                                  + * + *

                                  For more information about Amazon Web Services Outposts, see the following:

                                  + * + * */ public modifySubnetAttribute( args: ModifySubnetAttributeCommandInput, @@ -18915,6 +19070,76 @@ export class EC2 extends EC2Client { } } + /** + *

                                  Restores a snapshot from the Recycle Bin. For more information, see Restore + * snapshots from the Recycle Bin in the Amazon Elastic Compute Cloud User Guide.

                                  + */ + public restoreSnapshotFromRecycleBin( + args: RestoreSnapshotFromRecycleBinCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public restoreSnapshotFromRecycleBin( + args: RestoreSnapshotFromRecycleBinCommandInput, + cb: (err: any, data?: RestoreSnapshotFromRecycleBinCommandOutput) => void + ): void; + public restoreSnapshotFromRecycleBin( + args: RestoreSnapshotFromRecycleBinCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: RestoreSnapshotFromRecycleBinCommandOutput) => void + ): void; + public restoreSnapshotFromRecycleBin( + args: RestoreSnapshotFromRecycleBinCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: RestoreSnapshotFromRecycleBinCommandOutput) => void), + cb?: (err: any, data?: RestoreSnapshotFromRecycleBinCommandOutput) => void + ): Promise | void { + const command = new RestoreSnapshotFromRecycleBinCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Restores an archived Amazon EBS snapshot for use temporarily or permanently, or modifies the restore + * period or restore type for a snapshot that was previously temporarily restored.

                                  + * + *

                                  For more information see + * Restore an archived snapshot and + * modify the restore period or restore type for a temporarily restored snapshot in the Amazon Elastic Compute Cloud User Guide.

                                  + */ + public restoreSnapshotTier( + args: RestoreSnapshotTierCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public restoreSnapshotTier( + args: RestoreSnapshotTierCommandInput, + cb: (err: any, data?: RestoreSnapshotTierCommandOutput) => void + ): void; + public restoreSnapshotTier( + args: RestoreSnapshotTierCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: RestoreSnapshotTierCommandOutput) => void + ): void; + public restoreSnapshotTier( + args: RestoreSnapshotTierCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: RestoreSnapshotTierCommandOutput) => void), + cb?: (err: any, data?: RestoreSnapshotTierCommandOutput) => void + ): Promise | void { + const command = new RestoreSnapshotTierCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Removes an ingress authorization rule from a Client VPN endpoint.

                                  */ diff --git a/clients/client-ec2/src/EC2Client.ts b/clients/client-ec2/src/EC2Client.ts index ece1e614a763..8a36dd17cf56 100644 --- a/clients/client-ec2/src/EC2Client.ts +++ b/clients/client-ec2/src/EC2Client.ts @@ -896,6 +896,10 @@ import { DescribeSnapshotAttributeCommandOutput, } from "./commands/DescribeSnapshotAttributeCommand"; import { DescribeSnapshotsCommandInput, DescribeSnapshotsCommandOutput } from "./commands/DescribeSnapshotsCommand"; +import { + DescribeSnapshotTierStatusCommandInput, + DescribeSnapshotTierStatusCommandOutput, +} from "./commands/DescribeSnapshotTierStatusCommand"; import { DescribeSpotDatafeedSubscriptionCommandInput, DescribeSpotDatafeedSubscriptionCommandOutput, @@ -1288,6 +1292,10 @@ import { ImportInstanceCommandInput, ImportInstanceCommandOutput } from "./comma import { ImportKeyPairCommandInput, ImportKeyPairCommandOutput } from "./commands/ImportKeyPairCommand"; import { ImportSnapshotCommandInput, ImportSnapshotCommandOutput } from "./commands/ImportSnapshotCommand"; import { ImportVolumeCommandInput, ImportVolumeCommandOutput } from "./commands/ImportVolumeCommand"; +import { + ListSnapshotsInRecycleBinCommandInput, + ListSnapshotsInRecycleBinCommandOutput, +} from "./commands/ListSnapshotsInRecycleBinCommand"; import { ModifyAddressAttributeCommandInput, ModifyAddressAttributeCommandOutput, @@ -1387,6 +1395,7 @@ import { ModifySnapshotAttributeCommandInput, ModifySnapshotAttributeCommandOutput, } from "./commands/ModifySnapshotAttributeCommand"; +import { ModifySnapshotTierCommandInput, ModifySnapshotTierCommandOutput } from "./commands/ModifySnapshotTierCommand"; import { ModifySpotFleetRequestCommandInput, ModifySpotFleetRequestCommandOutput, @@ -1576,6 +1585,14 @@ import { RestoreManagedPrefixListVersionCommandInput, RestoreManagedPrefixListVersionCommandOutput, } from "./commands/RestoreManagedPrefixListVersionCommand"; +import { + RestoreSnapshotFromRecycleBinCommandInput, + RestoreSnapshotFromRecycleBinCommandOutput, +} from "./commands/RestoreSnapshotFromRecycleBinCommand"; +import { + RestoreSnapshotTierCommandInput, + RestoreSnapshotTierCommandOutput, +} from "./commands/RestoreSnapshotTierCommand"; import { RevokeClientVpnIngressCommandInput, RevokeClientVpnIngressCommandOutput, @@ -1903,6 +1920,7 @@ export type ServiceInputTypes = | DescribeSecurityGroupRulesCommandInput | DescribeSecurityGroupsCommandInput | DescribeSnapshotAttributeCommandInput + | DescribeSnapshotTierStatusCommandInput | DescribeSnapshotsCommandInput | DescribeSpotDatafeedSubscriptionCommandInput | DescribeSpotFleetInstancesCommandInput @@ -2014,6 +2032,7 @@ export type ServiceInputTypes = | ImportKeyPairCommandInput | ImportSnapshotCommandInput | ImportVolumeCommandInput + | ListSnapshotsInRecycleBinCommandInput | ModifyAddressAttributeCommandInput | ModifyAvailabilityZoneGroupCommandInput | ModifyCapacityReservationCommandInput @@ -2041,6 +2060,7 @@ export type ServiceInputTypes = | ModifyReservedInstancesCommandInput | ModifySecurityGroupRulesCommandInput | ModifySnapshotAttributeCommandInput + | ModifySnapshotTierCommandInput | ModifySpotFleetRequestCommandInput | ModifySubnetAttributeCommandInput | ModifyTrafficMirrorFilterNetworkServicesCommandInput @@ -2098,6 +2118,8 @@ export type ServiceInputTypes = | ResetSnapshotAttributeCommandInput | RestoreAddressToClassicCommandInput | RestoreManagedPrefixListVersionCommandInput + | RestoreSnapshotFromRecycleBinCommandInput + | RestoreSnapshotTierCommandInput | RevokeClientVpnIngressCommandInput | RevokeSecurityGroupEgressCommandInput | RevokeSecurityGroupIngressCommandInput @@ -2379,6 +2401,7 @@ export type ServiceOutputTypes = | DescribeSecurityGroupRulesCommandOutput | DescribeSecurityGroupsCommandOutput | DescribeSnapshotAttributeCommandOutput + | DescribeSnapshotTierStatusCommandOutput | DescribeSnapshotsCommandOutput | DescribeSpotDatafeedSubscriptionCommandOutput | DescribeSpotFleetInstancesCommandOutput @@ -2490,6 +2513,7 @@ export type ServiceOutputTypes = | ImportKeyPairCommandOutput | ImportSnapshotCommandOutput | ImportVolumeCommandOutput + | ListSnapshotsInRecycleBinCommandOutput | ModifyAddressAttributeCommandOutput | ModifyAvailabilityZoneGroupCommandOutput | ModifyCapacityReservationCommandOutput @@ -2517,6 +2541,7 @@ export type ServiceOutputTypes = | ModifyReservedInstancesCommandOutput | ModifySecurityGroupRulesCommandOutput | ModifySnapshotAttributeCommandOutput + | ModifySnapshotTierCommandOutput | ModifySpotFleetRequestCommandOutput | ModifySubnetAttributeCommandOutput | ModifyTrafficMirrorFilterNetworkServicesCommandOutput @@ -2574,6 +2599,8 @@ export type ServiceOutputTypes = | ResetSnapshotAttributeCommandOutput | RestoreAddressToClassicCommandOutput | RestoreManagedPrefixListVersionCommandOutput + | RestoreSnapshotFromRecycleBinCommandOutput + | RestoreSnapshotTierCommandOutput | RevokeClientVpnIngressCommandOutput | RevokeSecurityGroupEgressCommandOutput | RevokeSecurityGroupIngressCommandOutput diff --git a/clients/client-ec2/src/commands/CreateVpcEndpointConnectionNotificationCommand.ts b/clients/client-ec2/src/commands/CreateVpcEndpointConnectionNotificationCommand.ts index 0e5ad4495406..143c564459f1 100644 --- a/clients/client-ec2/src/commands/CreateVpcEndpointConnectionNotificationCommand.ts +++ b/clients/client-ec2/src/commands/CreateVpcEndpointConnectionNotificationCommand.ts @@ -12,8 +12,10 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { CreateVpcEndpointConnectionNotificationRequest } from "../models/models_1"; -import { CreateVpcEndpointConnectionNotificationResult } from "../models/models_2"; +import { + CreateVpcEndpointConnectionNotificationRequest, + CreateVpcEndpointConnectionNotificationResult, +} from "../models/models_2"; import { deserializeAws_ec2CreateVpcEndpointConnectionNotificationCommand, serializeAws_ec2CreateVpcEndpointConnectionNotificationCommand, diff --git a/clients/client-ec2/src/commands/DescribeHostReservationsCommand.ts b/clients/client-ec2/src/commands/DescribeHostReservationsCommand.ts index 12e6fb3535a7..300233c77733 100644 --- a/clients/client-ec2/src/commands/DescribeHostReservationsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeHostReservationsCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { DescribeHostReservationsRequest } from "../models/models_2"; -import { DescribeHostReservationsResult } from "../models/models_3"; +import { DescribeHostReservationsRequest, DescribeHostReservationsResult } from "../models/models_3"; import { deserializeAws_ec2DescribeHostReservationsCommand, serializeAws_ec2DescribeHostReservationsCommand, diff --git a/clients/client-ec2/src/commands/DescribeSnapshotTierStatusCommand.ts b/clients/client-ec2/src/commands/DescribeSnapshotTierStatusCommand.ts new file mode 100644 index 000000000000..be3f2e9c78bd --- /dev/null +++ b/clients/client-ec2/src/commands/DescribeSnapshotTierStatusCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { DescribeSnapshotTierStatusRequest, DescribeSnapshotTierStatusResult } from "../models/models_3"; +import { + deserializeAws_ec2DescribeSnapshotTierStatusCommand, + serializeAws_ec2DescribeSnapshotTierStatusCommand, +} from "../protocols/Aws_ec2"; + +export interface DescribeSnapshotTierStatusCommandInput extends DescribeSnapshotTierStatusRequest {} +export interface DescribeSnapshotTierStatusCommandOutput extends DescribeSnapshotTierStatusResult, __MetadataBearer {} + +/** + *

                                  Describes the storage tier status of one or more Amazon EBS snapshots.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, DescribeSnapshotTierStatusCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, DescribeSnapshotTierStatusCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new DescribeSnapshotTierStatusCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeSnapshotTierStatusCommandInput} for command's `input` shape. + * @see {@link DescribeSnapshotTierStatusCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class DescribeSnapshotTierStatusCommand extends $Command< + DescribeSnapshotTierStatusCommandInput, + DescribeSnapshotTierStatusCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeSnapshotTierStatusCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "DescribeSnapshotTierStatusCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeSnapshotTierStatusRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeSnapshotTierStatusResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeSnapshotTierStatusCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_ec2DescribeSnapshotTierStatusCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_ec2DescribeSnapshotTierStatusCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/DescribeSpotPriceHistoryCommand.ts b/clients/client-ec2/src/commands/DescribeSpotPriceHistoryCommand.ts index 6cd6115a710a..db69132c3e97 100644 --- a/clients/client-ec2/src/commands/DescribeSpotPriceHistoryCommand.ts +++ b/clients/client-ec2/src/commands/DescribeSpotPriceHistoryCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { DescribeSpotPriceHistoryRequest, DescribeSpotPriceHistoryResult } from "../models/models_3"; +import { DescribeSpotPriceHistoryRequest } from "../models/models_3"; +import { DescribeSpotPriceHistoryResult } from "../models/models_4"; import { deserializeAws_ec2DescribeSpotPriceHistoryCommand, serializeAws_ec2DescribeSpotPriceHistoryCommand, diff --git a/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts b/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts index 880010df3900..6a34368869f8 100644 --- a/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts +++ b/clients/client-ec2/src/commands/DescribeStaleSecurityGroupsCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { DescribeStaleSecurityGroupsRequest } from "../models/models_3"; -import { DescribeStaleSecurityGroupsResult } from "../models/models_4"; +import { DescribeStaleSecurityGroupsRequest, DescribeStaleSecurityGroupsResult } from "../models/models_4"; import { deserializeAws_ec2DescribeStaleSecurityGroupsCommand, serializeAws_ec2DescribeStaleSecurityGroupsCommand, diff --git a/clients/client-ec2/src/commands/ListSnapshotsInRecycleBinCommand.ts b/clients/client-ec2/src/commands/ListSnapshotsInRecycleBinCommand.ts new file mode 100644 index 000000000000..3df608946acf --- /dev/null +++ b/clients/client-ec2/src/commands/ListSnapshotsInRecycleBinCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { ListSnapshotsInRecycleBinRequest, ListSnapshotsInRecycleBinResult } from "../models/models_4"; +import { + deserializeAws_ec2ListSnapshotsInRecycleBinCommand, + serializeAws_ec2ListSnapshotsInRecycleBinCommand, +} from "../protocols/Aws_ec2"; + +export interface ListSnapshotsInRecycleBinCommandInput extends ListSnapshotsInRecycleBinRequest {} +export interface ListSnapshotsInRecycleBinCommandOutput extends ListSnapshotsInRecycleBinResult, __MetadataBearer {} + +/** + *

                                  Lists one or more snapshots that are currently in the Recycle Bin.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, ListSnapshotsInRecycleBinCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, ListSnapshotsInRecycleBinCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new ListSnapshotsInRecycleBinCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListSnapshotsInRecycleBinCommandInput} for command's `input` shape. + * @see {@link ListSnapshotsInRecycleBinCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class ListSnapshotsInRecycleBinCommand extends $Command< + ListSnapshotsInRecycleBinCommandInput, + ListSnapshotsInRecycleBinCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListSnapshotsInRecycleBinCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "ListSnapshotsInRecycleBinCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListSnapshotsInRecycleBinRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListSnapshotsInRecycleBinResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListSnapshotsInRecycleBinCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_ec2ListSnapshotsInRecycleBinCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_ec2ListSnapshotsInRecycleBinCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/ModifyHostsCommand.ts b/clients/client-ec2/src/commands/ModifyHostsCommand.ts index 9ffbd31ae536..f148d8480222 100644 --- a/clients/client-ec2/src/commands/ModifyHostsCommand.ts +++ b/clients/client-ec2/src/commands/ModifyHostsCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { ModifyHostsRequest, ModifyHostsResult } from "../models/models_4"; +import { ModifyHostsRequest } from "../models/models_4"; +import { ModifyHostsResult } from "../models/models_5"; import { deserializeAws_ec2ModifyHostsCommand, serializeAws_ec2ModifyHostsCommand } from "../protocols/Aws_ec2"; export interface ModifyHostsCommandInput extends ModifyHostsRequest {} diff --git a/clients/client-ec2/src/commands/ModifyIdFormatCommand.ts b/clients/client-ec2/src/commands/ModifyIdFormatCommand.ts index ea640c4eedd1..5cd6ea48d27a 100644 --- a/clients/client-ec2/src/commands/ModifyIdFormatCommand.ts +++ b/clients/client-ec2/src/commands/ModifyIdFormatCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { ModifyIdFormatRequest } from "../models/models_4"; +import { ModifyIdFormatRequest } from "../models/models_5"; import { deserializeAws_ec2ModifyIdFormatCommand, serializeAws_ec2ModifyIdFormatCommand } from "../protocols/Aws_ec2"; export interface ModifyIdFormatCommandInput extends ModifyIdFormatRequest {} diff --git a/clients/client-ec2/src/commands/ModifyIdentityIdFormatCommand.ts b/clients/client-ec2/src/commands/ModifyIdentityIdFormatCommand.ts index c94eabc8da11..ac87707cebe0 100644 --- a/clients/client-ec2/src/commands/ModifyIdentityIdFormatCommand.ts +++ b/clients/client-ec2/src/commands/ModifyIdentityIdFormatCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { ModifyIdentityIdFormatRequest } from "../models/models_4"; +import { ModifyIdentityIdFormatRequest } from "../models/models_5"; import { deserializeAws_ec2ModifyIdentityIdFormatCommand, serializeAws_ec2ModifyIdentityIdFormatCommand, diff --git a/clients/client-ec2/src/commands/ModifyImageAttributeCommand.ts b/clients/client-ec2/src/commands/ModifyImageAttributeCommand.ts index d636cda871c0..4a8a7a477309 100644 --- a/clients/client-ec2/src/commands/ModifyImageAttributeCommand.ts +++ b/clients/client-ec2/src/commands/ModifyImageAttributeCommand.ts @@ -12,7 +12,7 @@ import { } from "@aws-sdk/types"; import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; -import { ModifyImageAttributeRequest } from "../models/models_4"; +import { ModifyImageAttributeRequest } from "../models/models_5"; import { deserializeAws_ec2ModifyImageAttributeCommand, serializeAws_ec2ModifyImageAttributeCommand, diff --git a/clients/client-ec2/src/commands/ModifySnapshotTierCommand.ts b/clients/client-ec2/src/commands/ModifySnapshotTierCommand.ts new file mode 100644 index 000000000000..55d2eb971b72 --- /dev/null +++ b/clients/client-ec2/src/commands/ModifySnapshotTierCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { ModifySnapshotTierRequest, ModifySnapshotTierResult } from "../models/models_5"; +import { + deserializeAws_ec2ModifySnapshotTierCommand, + serializeAws_ec2ModifySnapshotTierCommand, +} from "../protocols/Aws_ec2"; + +export interface ModifySnapshotTierCommandInput extends ModifySnapshotTierRequest {} +export interface ModifySnapshotTierCommandOutput extends ModifySnapshotTierResult, __MetadataBearer {} + +/** + *

                                  Archives an Amazon EBS snapshot. When you archive a snapshot, it is converted to a full + * snapshot that includes all of the blocks of data that were written to the volume at the + * time the snapshot was created, and moved from the standard tier to the archive + * tier. For more information, see Archive Amazon EBS snapshots + * in the Amazon Elastic Compute Cloud User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, ModifySnapshotTierCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, ModifySnapshotTierCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new ModifySnapshotTierCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ModifySnapshotTierCommandInput} for command's `input` shape. + * @see {@link ModifySnapshotTierCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class ModifySnapshotTierCommand extends $Command< + ModifySnapshotTierCommandInput, + ModifySnapshotTierCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ModifySnapshotTierCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "ModifySnapshotTierCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ModifySnapshotTierRequest.filterSensitiveLog, + outputFilterSensitiveLog: ModifySnapshotTierResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ModifySnapshotTierCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_ec2ModifySnapshotTierCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_ec2ModifySnapshotTierCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/ModifySubnetAttributeCommand.ts b/clients/client-ec2/src/commands/ModifySubnetAttributeCommand.ts index 5259ee5632f7..11530c9dc2f7 100644 --- a/clients/client-ec2/src/commands/ModifySubnetAttributeCommand.ts +++ b/clients/client-ec2/src/commands/ModifySubnetAttributeCommand.ts @@ -23,6 +23,36 @@ export interface ModifySubnetAttributeCommandOutput extends __MetadataBearer {} /** *

                                  Modifies a subnet attribute. You can only modify one attribute at a time.

                                  + * + *

                                  Use this action to modify subnets on Amazon Web Services Outposts.

                                  + *
                                    + *
                                  • + *

                                    To modify a subnet on an Outpost rack, set both + * MapCustomerOwnedIpOnLaunch and + * CustomerOwnedIpv4Pool. These two parameters act as a single + * attribute.

                                    + *
                                  • + *
                                  • + *

                                    To modify a subnet on an Outpost server, set either + * EnableLniAtDeviceIndex or + * DisableLniAtDeviceIndex.

                                    + *
                                  • + *
                                  + * + *

                                  For more information about Amazon Web Services Outposts, see the following:

                                  + * + * * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ec2/src/commands/RestoreSnapshotFromRecycleBinCommand.ts b/clients/client-ec2/src/commands/RestoreSnapshotFromRecycleBinCommand.ts new file mode 100644 index 000000000000..b96f003ca4dc --- /dev/null +++ b/clients/client-ec2/src/commands/RestoreSnapshotFromRecycleBinCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { RestoreSnapshotFromRecycleBinRequest, RestoreSnapshotFromRecycleBinResult } from "../models/models_5"; +import { + deserializeAws_ec2RestoreSnapshotFromRecycleBinCommand, + serializeAws_ec2RestoreSnapshotFromRecycleBinCommand, +} from "../protocols/Aws_ec2"; + +export interface RestoreSnapshotFromRecycleBinCommandInput extends RestoreSnapshotFromRecycleBinRequest {} +export interface RestoreSnapshotFromRecycleBinCommandOutput + extends RestoreSnapshotFromRecycleBinResult, + __MetadataBearer {} + +/** + *

                                  Restores a snapshot from the Recycle Bin. For more information, see Restore + * snapshots from the Recycle Bin in the Amazon Elastic Compute Cloud User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, RestoreSnapshotFromRecycleBinCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, RestoreSnapshotFromRecycleBinCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new RestoreSnapshotFromRecycleBinCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreSnapshotFromRecycleBinCommandInput} for command's `input` shape. + * @see {@link RestoreSnapshotFromRecycleBinCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class RestoreSnapshotFromRecycleBinCommand extends $Command< + RestoreSnapshotFromRecycleBinCommandInput, + RestoreSnapshotFromRecycleBinCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreSnapshotFromRecycleBinCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "RestoreSnapshotFromRecycleBinCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: RestoreSnapshotFromRecycleBinRequest.filterSensitiveLog, + outputFilterSensitiveLog: RestoreSnapshotFromRecycleBinResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: RestoreSnapshotFromRecycleBinCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_ec2RestoreSnapshotFromRecycleBinCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_ec2RestoreSnapshotFromRecycleBinCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/RestoreSnapshotTierCommand.ts b/clients/client-ec2/src/commands/RestoreSnapshotTierCommand.ts new file mode 100644 index 000000000000..1246b131c2fb --- /dev/null +++ b/clients/client-ec2/src/commands/RestoreSnapshotTierCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EC2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EC2Client"; +import { RestoreSnapshotTierRequest, RestoreSnapshotTierResult } from "../models/models_5"; +import { + deserializeAws_ec2RestoreSnapshotTierCommand, + serializeAws_ec2RestoreSnapshotTierCommand, +} from "../protocols/Aws_ec2"; + +export interface RestoreSnapshotTierCommandInput extends RestoreSnapshotTierRequest {} +export interface RestoreSnapshotTierCommandOutput extends RestoreSnapshotTierResult, __MetadataBearer {} + +/** + *

                                  Restores an archived Amazon EBS snapshot for use temporarily or permanently, or modifies the restore + * period or restore type for a snapshot that was previously temporarily restored.

                                  + * + *

                                  For more information see + * Restore an archived snapshot and + * modify the restore period or restore type for a temporarily restored snapshot in the Amazon Elastic Compute Cloud User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EC2Client, RestoreSnapshotTierCommand } from "@aws-sdk/client-ec2"; // ES Modules import + * // const { EC2Client, RestoreSnapshotTierCommand } = require("@aws-sdk/client-ec2"); // CommonJS import + * const client = new EC2Client(config); + * const command = new RestoreSnapshotTierCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreSnapshotTierCommandInput} for command's `input` shape. + * @see {@link RestoreSnapshotTierCommandOutput} for command's `response` shape. + * @see {@link EC2ClientResolvedConfig | config} for EC2Client's `config` shape. + * + */ +export class RestoreSnapshotTierCommand extends $Command< + RestoreSnapshotTierCommandInput, + RestoreSnapshotTierCommandOutput, + EC2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreSnapshotTierCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EC2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EC2Client"; + const commandName = "RestoreSnapshotTierCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: RestoreSnapshotTierRequest.filterSensitiveLog, + outputFilterSensitiveLog: RestoreSnapshotTierResult.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: RestoreSnapshotTierCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_ec2RestoreSnapshotTierCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_ec2RestoreSnapshotTierCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ec2/src/commands/index.ts b/clients/client-ec2/src/commands/index.ts index 581103497f3c..194ef27ced3c 100644 --- a/clients/client-ec2/src/commands/index.ts +++ b/clients/client-ec2/src/commands/index.ts @@ -256,6 +256,7 @@ export * from "./DescribeSecurityGroupReferencesCommand"; export * from "./DescribeSecurityGroupRulesCommand"; export * from "./DescribeSecurityGroupsCommand"; export * from "./DescribeSnapshotAttributeCommand"; +export * from "./DescribeSnapshotTierStatusCommand"; export * from "./DescribeSnapshotsCommand"; export * from "./DescribeSpotDatafeedSubscriptionCommand"; export * from "./DescribeSpotFleetInstancesCommand"; @@ -367,6 +368,7 @@ export * from "./ImportInstanceCommand"; export * from "./ImportKeyPairCommand"; export * from "./ImportSnapshotCommand"; export * from "./ImportVolumeCommand"; +export * from "./ListSnapshotsInRecycleBinCommand"; export * from "./ModifyAddressAttributeCommand"; export * from "./ModifyAvailabilityZoneGroupCommand"; export * from "./ModifyCapacityReservationCommand"; @@ -394,6 +396,7 @@ export * from "./ModifyPrivateDnsNameOptionsCommand"; export * from "./ModifyReservedInstancesCommand"; export * from "./ModifySecurityGroupRulesCommand"; export * from "./ModifySnapshotAttributeCommand"; +export * from "./ModifySnapshotTierCommand"; export * from "./ModifySpotFleetRequestCommand"; export * from "./ModifySubnetAttributeCommand"; export * from "./ModifyTrafficMirrorFilterNetworkServicesCommand"; @@ -451,6 +454,8 @@ export * from "./ResetNetworkInterfaceAttributeCommand"; export * from "./ResetSnapshotAttributeCommand"; export * from "./RestoreAddressToClassicCommand"; export * from "./RestoreManagedPrefixListVersionCommand"; +export * from "./RestoreSnapshotFromRecycleBinCommand"; +export * from "./RestoreSnapshotTierCommand"; export * from "./RevokeClientVpnIngressCommand"; export * from "./RevokeSecurityGroupEgressCommand"; export * from "./RevokeSecurityGroupIngressCommand"; diff --git a/clients/client-ec2/src/models/models_0.ts b/clients/client-ec2/src/models/models_0.ts index 328f98626f54..1a8605054554 100644 --- a/clients/client-ec2/src/models/models_0.ts +++ b/clients/client-ec2/src/models/models_0.ts @@ -5834,6 +5834,12 @@ export type _InstanceType = | "g5.4xlarge" | "g5.8xlarge" | "g5.xlarge" + | "g5g.16xlarge" + | "g5g.2xlarge" + | "g5g.4xlarge" + | "g5g.8xlarge" + | "g5g.metal" + | "g5g.xlarge" | "h1.16xlarge" | "h1.2xlarge" | "h1.4xlarge" @@ -5859,10 +5865,22 @@ export type _InstanceType = | "i3en.large" | "i3en.metal" | "i3en.xlarge" + | "im4gn.16xlarge" + | "im4gn.2xlarge" + | "im4gn.4xlarge" + | "im4gn.8xlarge" + | "im4gn.large" + | "im4gn.xlarge" | "inf1.24xlarge" | "inf1.2xlarge" | "inf1.6xlarge" | "inf1.xlarge" + | "is4gen.2xlarge" + | "is4gen.4xlarge" + | "is4gen.8xlarge" + | "is4gen.large" + | "is4gen.medium" + | "is4gen.xlarge" | "m1.large" | "m1.medium" | "m1.small" @@ -5939,6 +5957,16 @@ export type _InstanceType = | "m5zn.large" | "m5zn.metal" | "m5zn.xlarge" + | "m6a.12xlarge" + | "m6a.16xlarge" + | "m6a.24xlarge" + | "m6a.2xlarge" + | "m6a.32xlarge" + | "m6a.48xlarge" + | "m6a.4xlarge" + | "m6a.8xlarge" + | "m6a.large" + | "m6a.xlarge" | "m6g.12xlarge" | "m6g.16xlarge" | "m6g.2xlarge" @@ -7209,6 +7237,15 @@ export interface Subnet { */ DefaultForAz?: boolean; + /** + *

                                  + * Indicates the device position for local network interfaces in this subnet. For example, + * 1 indicates local network interfaces in this subnet are the secondary + * network interface (eth1). + *

                                  + */ + EnableLniAtDeviceIndex?: number; + /** *

                                  Indicates whether instances launched in this subnet receive a public IPv4 address.

                                  */ diff --git a/clients/client-ec2/src/models/models_1.ts b/clients/client-ec2/src/models/models_1.ts index 915da4d5b11e..8bf282e347e9 100644 --- a/clients/client-ec2/src/models/models_1.ts +++ b/clients/client-ec2/src/models/models_1.ts @@ -6642,7 +6642,12 @@ export namespace CreateSnapshotRequest { }); } -export type SnapshotState = "completed" | "error" | "pending"; +export type SnapshotState = "completed" | "error" | "pending" | "recoverable" | "recovering"; + +export enum StorageTier { + archive = "archive", + standard = "standard", +} /** *

                                  Describes a snapshot.

                                  @@ -6734,6 +6739,20 @@ export interface Snapshot { *

                                  Any tags assigned to the snapshot.

                                  */ Tags?: Tag[]; + + /** + *

                                  The storage tier in which the snapshot is stored. standard indicates + * that the snapshot is stored in the standard snapshot storage tier and that it is ready + * for use. archive indicates that the snapshot is currently archived and that + * it must be restored before it can be used.

                                  + */ + StorageTier?: StorageTier | string; + + /** + *

                                  Only for archived snapshots that are temporarily restored. Indicates the date and + * time when a temporarily restored snapshot will be automatically re-archived.

                                  + */ + RestoreExpiryTime?: Date; } export namespace Snapshot { @@ -9889,50 +9908,3 @@ export namespace CreateVpcEndpointResult { ...obj, }); } - -export interface CreateVpcEndpointConnectionNotificationRequest { - /** - *

                                  Checks whether you have the required permissions for the action, without actually making the request, - * and provides an error response. If you have the required permissions, the error response is DryRunOperation. - * Otherwise, it is UnauthorizedOperation.

                                  - */ - DryRun?: boolean; - - /** - *

                                  The ID of the endpoint service.

                                  - */ - ServiceId?: string; - - /** - *

                                  The ID of the endpoint.

                                  - */ - VpcEndpointId?: string; - - /** - *

                                  The ARN of the SNS topic for the notifications.

                                  - */ - ConnectionNotificationArn: string | undefined; - - /** - *

                                  One or more endpoint events for which to receive notifications. Valid values are - * Accept, Connect, Delete, and - * Reject.

                                  - */ - ConnectionEvents: string[] | undefined; - - /** - *

                                  Unique, case-sensitive identifier that you provide to ensure the idempotency of the - * request. For more information, see How to ensure - * idempotency.

                                  - */ - ClientToken?: string; -} - -export namespace CreateVpcEndpointConnectionNotificationRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateVpcEndpointConnectionNotificationRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_2.ts b/clients/client-ec2/src/models/models_2.ts index ba588db13f93..ea2e84dd2b4f 100644 --- a/clients/client-ec2/src/models/models_2.ts +++ b/clients/client-ec2/src/models/models_2.ts @@ -69,6 +69,53 @@ import { TransitGatewayRouteTable, } from "./models_1"; +export interface CreateVpcEndpointConnectionNotificationRequest { + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; + + /** + *

                                  The ID of the endpoint service.

                                  + */ + ServiceId?: string; + + /** + *

                                  The ID of the endpoint.

                                  + */ + VpcEndpointId?: string; + + /** + *

                                  The ARN of the SNS topic for the notifications.

                                  + */ + ConnectionNotificationArn: string | undefined; + + /** + *

                                  One or more endpoint events for which to receive notifications. Valid values are + * Accept, Connect, Delete, and + * Reject.

                                  + */ + ConnectionEvents: string[] | undefined; + + /** + *

                                  Unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. For more information, see How to ensure + * idempotency.

                                  + */ + ClientToken?: string; +} + +export namespace CreateVpcEndpointConnectionNotificationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateVpcEndpointConnectionNotificationRequest): any => ({ + ...obj, + }); +} + export enum ConnectionNotificationState { Disabled = "Disabled", Enabled = "Enabled", @@ -9334,61 +9381,3 @@ export namespace DescribeHostReservationOfferingsResult { ...obj, }); } - -export interface DescribeHostReservationsRequest { - /** - *

                                  The filters.

                                  - *
                                    - *
                                  • - *

                                    - * instance-family - The instance family (for example, - * m4).

                                    - *
                                  • - *
                                  • - *

                                    - * payment-option - The payment option (NoUpfront | - * PartialUpfront | AllUpfront).

                                    - *
                                  • - *
                                  • - *

                                    - * state - The state of the reservation (payment-pending - * | payment-failed | active | - * retired).

                                    - *
                                  • - *
                                  • - *

                                    - * tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. - * For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                                    - *
                                  • - *
                                  • - *

                                    - * tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

                                    - *
                                  • - *
                                  - */ - Filter?: Filter[]; - - /** - *

                                  The host reservation IDs.

                                  - */ - HostReservationIdSet?: string[]; - - /** - *

                                  The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

                                  - */ - MaxResults?: number; - - /** - *

                                  The token to use to retrieve the next page of results.

                                  - */ - NextToken?: string; -} - -export namespace DescribeHostReservationsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeHostReservationsRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_3.ts b/clients/client-ec2/src/models/models_3.ts index 899456dd6e36..2397a6c4fd5d 100644 --- a/clients/client-ec2/src/models/models_3.ts +++ b/clients/client-ec2/src/models/models_3.ts @@ -28,7 +28,6 @@ import { TagSpecification, TargetCapacityUnitType, Tenancy, - UserIdGroupPair, } from "./models_0"; import { BlockDeviceMapping, @@ -60,9 +59,11 @@ import { ReplaceRootVolumeTask, RouteTable, Snapshot, + SnapshotState, SpotDatafeedSubscription, SpotInstanceStateFault, SpotInstanceType, + StorageTier, } from "./models_1"; import { EventInformation, @@ -74,6 +75,64 @@ import { ProductCode, } from "./models_2"; +export interface DescribeHostReservationsRequest { + /** + *

                                  The filters.

                                  + *
                                    + *
                                  • + *

                                    + * instance-family - The instance family (for example, + * m4).

                                    + *
                                  • + *
                                  • + *

                                    + * payment-option - The payment option (NoUpfront | + * PartialUpfront | AllUpfront).

                                    + *
                                  • + *
                                  • + *

                                    + * state - The state of the reservation (payment-pending + * | payment-failed | active | + * retired).

                                    + *
                                  • + *
                                  • + *

                                    + * tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. + * For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                                    + *
                                  • + *
                                  • + *

                                    + * tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

                                    + *
                                  • + *
                                  + */ + Filter?: Filter[]; + + /** + *

                                  The host reservation IDs.

                                  + */ + HostReservationIdSet?: string[]; + + /** + *

                                  The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

                                  + */ + MaxResults?: number; + + /** + *

                                  The token to use to retrieve the next page of results.

                                  + */ + NextToken?: string; +} + +export namespace DescribeHostReservationsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeHostReservationsRequest): any => ({ + ...obj, + }); +} + export enum ReservationState { ACTIVE = "active", PAYMENT_FAILED = "payment-failed", @@ -10781,6 +10840,11 @@ export interface DescribeSnapshotsRequest { *
                                • *
                                • *

                                  + * storage-tier - The storage tier of the snapshot (archive | + * standard).

                                  + *
                                • + *
                                • + *

                                  * tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value. * For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                                  *
                                • @@ -10880,6 +10944,166 @@ export namespace DescribeSnapshotsResult { }); } +export interface DescribeSnapshotTierStatusRequest { + /** + *

                                  The filters.

                                  + *
                                    + *
                                  • + *

                                    + * snapshot-id - The snapshot ID.

                                    + *
                                  • + *
                                  • + *

                                    + * volume-id - The ID of the volume the snapshot is for.

                                    + *
                                  • + *
                                  • + *

                                    + * last-tiering-operation - The state of the last archive or restore action. (archiving | archival_error | + * archival_complete | restoring | restore_error | restore_complete)

                                    + *
                                  • + *
                                  + */ + Filters?: Filter[]; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; + + /** + *

                                  The token for the next page of results.

                                  + */ + NextToken?: string; + + /** + *

                                  The maximum number of results to return with a single call. + * To retrieve the remaining results, make another call with the returned nextToken value.

                                  + */ + MaxResults?: number; +} + +export namespace DescribeSnapshotTierStatusRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSnapshotTierStatusRequest): any => ({ + ...obj, + }); +} + +export enum TieringOperationStatus { + archival_completed = "archival-completed", + archival_failed = "archival-failed", + archival_in_progress = "archival-in-progress", + permanent_restore_completed = "permanent-restore-completed", + permanent_restore_failed = "permanent-restore-failed", + permanent_restore_in_progress = "permanent-restore-in-progress", + temporary_restore_completed = "temporary-restore-completed", + temporary_restore_failed = "temporary-restore-failed", + temporary_restore_in_progress = "temporary-restore-in-progress", +} + +/** + *

                                  Provides information about a snapshot's storage tier.

                                  + */ +export interface SnapshotTierStatus { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId?: string; + + /** + *

                                  The ID of the volume from which the snapshot was created.

                                  + */ + VolumeId?: string; + + /** + *

                                  The state of the snapshot.

                                  + */ + Status?: SnapshotState | string; + + /** + *

                                  The ID of the Amazon Web Services account that owns the snapshot.

                                  + */ + OwnerId?: string; + + /** + *

                                  The tags that are assigned to the snapshot.

                                  + */ + Tags?: Tag[]; + + /** + *

                                  The storage tier in which the snapshot is stored. standard indicates + * that the snapshot is stored in the standard snapshot storage tier and that it is ready + * for use. archive indicates that the snapshot is currently archived and that + * it must be restored before it can be used.

                                  + */ + StorageTier?: StorageTier | string; + + /** + *

                                  The date and time when the last archive or restore process was started.

                                  + */ + LastTieringStartTime?: Date; + + /** + *

                                  The progress of the last archive or restore process, as a percentage.

                                  + */ + LastTieringProgress?: number; + + /** + *

                                  The status of the last archive or restore process.

                                  + */ + LastTieringOperationStatus?: TieringOperationStatus | string; + + /** + *

                                  A message describing the status of the last archive or restore process.

                                  + */ + LastTieringOperationStatusDetail?: string; + + /** + *

                                  The date and time when the last archive process was completed.

                                  + */ + ArchivalCompleteTime?: Date; + + /** + *

                                  Only for archived snapshots that are temporarily restored. Indicates the date and + * time when a temporarily restored snapshot will be automatically re-archived.

                                  + */ + RestoreExpiryTime?: Date; +} + +export namespace SnapshotTierStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SnapshotTierStatus): any => ({ + ...obj, + }); +} + +export interface DescribeSnapshotTierStatusResult { + /** + *

                                  Information about the snapshot's storage tier.

                                  + */ + SnapshotTierStatuses?: SnapshotTierStatus[]; + + /** + *

                                  The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                  + */ + NextToken?: string; +} + +export namespace DescribeSnapshotTierStatusResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSnapshotTierStatusResult): any => ({ + ...obj, + }); +} + /** *

                                  Contains the parameters for DescribeSpotDatafeedSubscription.

                                  */ @@ -12765,192 +12989,3 @@ export namespace DescribeSpotPriceHistoryRequest { ...obj, }); } - -/** - *

                                  Describes the maximum price per hour that you are willing to pay for a Spot - * Instance.

                                  - */ -export interface SpotPrice { - /** - *

                                  The Availability Zone.

                                  - */ - AvailabilityZone?: string; - - /** - *

                                  The instance type.

                                  - */ - InstanceType?: _InstanceType | string; - - /** - *

                                  A general description of the AMI.

                                  - */ - ProductDescription?: RIProductDescription | string; - - /** - *

                                  The maximum price per hour that you are willing to pay for a Spot Instance.

                                  - */ - SpotPrice?: string; - - /** - *

                                  The date and time the request was created, in UTC format (for example, - * YYYY-MM-DDTHH:MM:SSZ).

                                  - */ - Timestamp?: Date; -} - -export namespace SpotPrice { - /** - * @internal - */ - export const filterSensitiveLog = (obj: SpotPrice): any => ({ - ...obj, - }); -} - -/** - *

                                  Contains the output of DescribeSpotPriceHistory.

                                  - */ -export interface DescribeSpotPriceHistoryResult { - /** - *

                                  The token required to retrieve the next set of results. This value is null or an empty - * string when there are no more results to return.

                                  - */ - NextToken?: string; - - /** - *

                                  The historical Spot prices.

                                  - */ - SpotPriceHistory?: SpotPrice[]; -} - -export namespace DescribeSpotPriceHistoryResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeSpotPriceHistoryResult): any => ({ - ...obj, - }); -} - -export interface DescribeStaleSecurityGroupsRequest { - /** - *

                                  Checks whether you have the required permissions for the action, without actually making the request, - * and provides an error response. If you have the required permissions, the error response is DryRunOperation. - * Otherwise, it is UnauthorizedOperation.

                                  - */ - DryRun?: boolean; - - /** - *

                                  The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

                                  - */ - MaxResults?: number; - - /** - *

                                  The token for the next set of items to return. (You received this token from a prior call.)

                                  - */ - NextToken?: string; - - /** - *

                                  The ID of the VPC.

                                  - */ - VpcId: string | undefined; -} - -export namespace DescribeStaleSecurityGroupsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeStaleSecurityGroupsRequest): any => ({ - ...obj, - }); -} - -/** - *

                                  Describes a stale rule in a security group.

                                  - */ -export interface StaleIpPermission { - /** - *

                                  The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of - * -1 indicates all ICMP types.

                                  - */ - FromPort?: number; - - /** - *

                                  The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

                                  - */ - IpProtocol?: string; - - /** - *

                                  The IP ranges. Not applicable for stale security group rules.

                                  - */ - IpRanges?: string[]; - - /** - *

                                  The prefix list IDs. Not applicable for stale security group rules.

                                  - */ - PrefixListIds?: string[]; - - /** - *

                                  The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of - * -1 indicates all ICMP types.

                                  - */ - ToPort?: number; - - /** - *

                                  The security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.

                                  - */ - UserIdGroupPairs?: UserIdGroupPair[]; -} - -export namespace StaleIpPermission { - /** - * @internal - */ - export const filterSensitiveLog = (obj: StaleIpPermission): any => ({ - ...obj, - }); -} - -/** - *

                                  Describes a stale security group (a security group that contains stale rules).

                                  - */ -export interface StaleSecurityGroup { - /** - *

                                  The description of the security group.

                                  - */ - Description?: string; - - /** - *

                                  The ID of the security group.

                                  - */ - GroupId?: string; - - /** - *

                                  The name of the security group.

                                  - */ - GroupName?: string; - - /** - *

                                  Information about the stale inbound rules in the security group.

                                  - */ - StaleIpPermissions?: StaleIpPermission[]; - - /** - *

                                  Information about the stale outbound rules in the security group.

                                  - */ - StaleIpPermissionsEgress?: StaleIpPermission[]; - - /** - *

                                  The ID of the VPC for the security group.

                                  - */ - VpcId?: string; -} - -export namespace StaleSecurityGroup { - /** - * @internal - */ - export const filterSensitiveLog = (obj: StaleSecurityGroup): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_4.ts b/clients/client-ec2/src/models/models_4.ts index 27964227ab89..3c413dbe94e5 100644 --- a/clients/client-ec2/src/models/models_4.ts +++ b/clients/client-ec2/src/models/models_4.ts @@ -5,7 +5,6 @@ import { AddressAttribute, AllowedPrincipal, AssociationStatus, - AttributeValue, AutoPlacement, CapacityReservationState, ClientConnectOptions, @@ -37,7 +36,7 @@ import { TransitGatewayPeeringAttachment, TransitGatewayVpcAttachment, TrunkInterfaceAssociation, - UnsuccessfulItem, + UserIdGroupPair, Vpc, VpcCidrBlockAssociation, VpcIpv6CidrBlockAssociation, @@ -87,13 +86,201 @@ import { AttributeBooleanValue, BootModeValues, ImportImageLicenseConfigurationResponse, - LaunchPermission, + RIProductDescription, SnapshotDetail, SnapshotTaskDetail, - StaleSecurityGroup, VirtualizationType, } from "./models_3"; +/** + *

                                  Describes the maximum price per hour that you are willing to pay for a Spot + * Instance.

                                  + */ +export interface SpotPrice { + /** + *

                                  The Availability Zone.

                                  + */ + AvailabilityZone?: string; + + /** + *

                                  The instance type.

                                  + */ + InstanceType?: _InstanceType | string; + + /** + *

                                  A general description of the AMI.

                                  + */ + ProductDescription?: RIProductDescription | string; + + /** + *

                                  The maximum price per hour that you are willing to pay for a Spot Instance.

                                  + */ + SpotPrice?: string; + + /** + *

                                  The date and time the request was created, in UTC format (for example, + * YYYY-MM-DDTHH:MM:SSZ).

                                  + */ + Timestamp?: Date; +} + +export namespace SpotPrice { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SpotPrice): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains the output of DescribeSpotPriceHistory.

                                  + */ +export interface DescribeSpotPriceHistoryResult { + /** + *

                                  The token required to retrieve the next set of results. This value is null or an empty + * string when there are no more results to return.

                                  + */ + NextToken?: string; + + /** + *

                                  The historical Spot prices.

                                  + */ + SpotPriceHistory?: SpotPrice[]; +} + +export namespace DescribeSpotPriceHistoryResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSpotPriceHistoryResult): any => ({ + ...obj, + }); +} + +export interface DescribeStaleSecurityGroupsRequest { + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; + + /** + *

                                  The maximum number of items to return for this request. The request returns a token that you can specify in a subsequent call to get the next set of results.

                                  + */ + MaxResults?: number; + + /** + *

                                  The token for the next set of items to return. (You received this token from a prior call.)

                                  + */ + NextToken?: string; + + /** + *

                                  The ID of the VPC.

                                  + */ + VpcId: string | undefined; +} + +export namespace DescribeStaleSecurityGroupsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeStaleSecurityGroupsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes a stale rule in a security group.

                                  + */ +export interface StaleIpPermission { + /** + *

                                  The start of the port range for the TCP and UDP protocols, or an ICMP type number. A value of + * -1 indicates all ICMP types.

                                  + */ + FromPort?: number; + + /** + *

                                  The IP protocol name (for tcp, udp, and icmp) or number (see Protocol Numbers).

                                  + */ + IpProtocol?: string; + + /** + *

                                  The IP ranges. Not applicable for stale security group rules.

                                  + */ + IpRanges?: string[]; + + /** + *

                                  The prefix list IDs. Not applicable for stale security group rules.

                                  + */ + PrefixListIds?: string[]; + + /** + *

                                  The end of the port range for the TCP and UDP protocols, or an ICMP type number. A value of + * -1 indicates all ICMP types.

                                  + */ + ToPort?: number; + + /** + *

                                  The security group pairs. Returns the ID of the referenced security group and VPC, and the ID and status of the VPC peering connection.

                                  + */ + UserIdGroupPairs?: UserIdGroupPair[]; +} + +export namespace StaleIpPermission { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StaleIpPermission): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes a stale security group (a security group that contains stale rules).

                                  + */ +export interface StaleSecurityGroup { + /** + *

                                  The description of the security group.

                                  + */ + Description?: string; + + /** + *

                                  The ID of the security group.

                                  + */ + GroupId?: string; + + /** + *

                                  The name of the security group.

                                  + */ + GroupName?: string; + + /** + *

                                  Information about the stale inbound rules in the security group.

                                  + */ + StaleIpPermissions?: StaleIpPermission[]; + + /** + *

                                  Information about the stale outbound rules in the security group.

                                  + */ + StaleIpPermissionsEgress?: StaleIpPermission[]; + + /** + *

                                  The ID of the VPC for the security group.

                                  + */ + VpcId?: string; +} + +export namespace StaleSecurityGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StaleSecurityGroup): any => ({ + ...obj, + }); +} + export interface DescribeStaleSecurityGroupsResult { /** *

                                  The token to use when requesting the next set of items. If there are no additional items to return, the string is empty.

                                  @@ -9062,6 +9249,101 @@ export namespace ImportVolumeResult { }); } +export interface ListSnapshotsInRecycleBinRequest { + /** + *

                                  The maximum number of results to return with a single call. + * To retrieve the remaining results, make another call with the returned nextToken value.

                                  + */ + MaxResults?: number; + + /** + *

                                  The token for the next page of results.

                                  + */ + NextToken?: string; + + /** + *

                                  The IDs of the snapshots to list. Omit this parameter to list all of the + * snapshots that are in the Recycle Bin.

                                  + */ + SnapshotIds?: string[]; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; +} + +export namespace ListSnapshotsInRecycleBinRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListSnapshotsInRecycleBinRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about a snapshot that is currently in the Recycle Bin.

                                  + */ +export interface SnapshotRecycleBinInfo { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId?: string; + + /** + *

                                  The date and time when the snaphsot entered the Recycle Bin.

                                  + */ + RecycleBinEnterTime?: Date; + + /** + *

                                  The date and time when the snapshot is to be permanently deleted from the Recycle Bin.

                                  + */ + RecycleBinExitTime?: Date; + + /** + *

                                  The description for the snapshot.

                                  + */ + Description?: string; + + /** + *

                                  The ID of the volume from which the snapshot was created.

                                  + */ + VolumeId?: string; +} + +export namespace SnapshotRecycleBinInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SnapshotRecycleBinInfo): any => ({ + ...obj, + }); +} + +export interface ListSnapshotsInRecycleBinResult { + /** + *

                                  Information about the snapshots.

                                  + */ + Snapshots?: SnapshotRecycleBinInfo[]; + + /** + *

                                  The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                  + */ + NextToken?: string; +} + +export namespace ListSnapshotsInRecycleBinResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListSnapshotsInRecycleBinResult): any => ({ + ...obj, + }); +} + export interface ModifyAddressAttributeRequest { /** *

                                  [EC2-VPC] The allocation ID.

                                  @@ -9789,275 +10071,3 @@ export namespace ModifyHostsRequest { ...obj, }); } - -export interface ModifyHostsResult { - /** - *

                                  The IDs of the Dedicated Hosts that were successfully modified.

                                  - */ - Successful?: string[]; - - /** - *

                                  The IDs of the Dedicated Hosts that could not be modified. Check whether the - * setting you requested can be used.

                                  - */ - Unsuccessful?: UnsuccessfulItem[]; -} - -export namespace ModifyHostsResult { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModifyHostsResult): any => ({ - ...obj, - }); -} - -export interface ModifyIdentityIdFormatRequest { - /** - *

                                  The ARN of the principal, which can be an IAM user, IAM role, or the root user. Specify - * all to modify the ID format for all IAM users, IAM roles, and the root user of - * the account.

                                  - */ - PrincipalArn: string | undefined; - - /** - *

                                  The type of resource: bundle | conversion-task | customer-gateway | dhcp-options | - * elastic-ip-allocation | elastic-ip-association | - * export-task | flow-log | image | - * import-task | internet-gateway | network-acl - * | network-acl-association | network-interface | - * network-interface-attachment | prefix-list | - * route-table | route-table-association | - * security-group | subnet | - * subnet-cidr-block-association | vpc | - * vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

                                  - *

                                  Alternatively, use the all-current option to include all resource types that are - * currently within their opt-in period for longer IDs.

                                  - */ - Resource: string | undefined; - - /** - *

                                  Indicates whether the resource should use longer IDs (17-character IDs)

                                  - */ - UseLongIds: boolean | undefined; -} - -export namespace ModifyIdentityIdFormatRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModifyIdentityIdFormatRequest): any => ({ - ...obj, - }); -} - -export interface ModifyIdFormatRequest { - /** - *

                                  The type of resource: bundle | conversion-task | customer-gateway | dhcp-options | - * elastic-ip-allocation | elastic-ip-association | - * export-task | flow-log | image | - * import-task | internet-gateway | network-acl - * | network-acl-association | network-interface | - * network-interface-attachment | prefix-list | - * route-table | route-table-association | - * security-group | subnet | - * subnet-cidr-block-association | vpc | - * vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

                                  - *

                                  Alternatively, use the all-current option to include all resource types that are - * currently within their opt-in period for longer IDs.

                                  - */ - Resource: string | undefined; - - /** - *

                                  Indicate whether the resource should use longer IDs (17-character IDs).

                                  - */ - UseLongIds: boolean | undefined; -} - -export namespace ModifyIdFormatRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModifyIdFormatRequest): any => ({ - ...obj, - }); -} - -/** - *

                                  Describes a launch permission modification.

                                  - */ -export interface LaunchPermissionModifications { - /** - *

                                  The Amazon Web Services account ID to add to the list of launch permissions for the AMI.

                                  - */ - Add?: LaunchPermission[]; - - /** - *

                                  The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.

                                  - */ - Remove?: LaunchPermission[]; -} - -export namespace LaunchPermissionModifications { - /** - * @internal - */ - export const filterSensitiveLog = (obj: LaunchPermissionModifications): any => ({ - ...obj, - }); -} - -/** - *

                                  Contains the parameters for ModifyImageAttribute.

                                  - */ -export interface ModifyImageAttributeRequest { - /** - *

                                  The name of the attribute to modify.

                                  - *

                                  Valid values: description | launchPermission - *

                                  - */ - Attribute?: string; - - /** - *

                                  A new description for the AMI.

                                  - */ - Description?: AttributeValue; - - /** - *

                                  The ID of the AMI.

                                  - */ - ImageId: string | undefined; - - /** - *

                                  A new launch permission for the AMI.

                                  - */ - LaunchPermission?: LaunchPermissionModifications; - - /** - *

                                  The operation type. - * This parameter can be used only when the Attribute parameter is launchPermission.

                                  - */ - OperationType?: OperationType | string; - - /** - *

                                  Not supported.

                                  - */ - ProductCodes?: string[]; - - /** - *

                                  The user groups. - * This parameter can be used only when the Attribute parameter is launchPermission.

                                  - */ - UserGroups?: string[]; - - /** - *

                                  The Amazon Web Services account IDs. - * This parameter can be used only when the Attribute parameter is launchPermission.

                                  - */ - UserIds?: string[]; - - /** - *

                                  The value of the attribute being modified. - * This parameter can be used only when the Attribute parameter is description.

                                  - */ - Value?: string; - - /** - *

                                  Checks whether you have the required permissions for the action, without actually making the request, - * and provides an error response. If you have the required permissions, the error response is DryRunOperation. - * Otherwise, it is UnauthorizedOperation.

                                  - */ - DryRun?: boolean; - - /** - *

                                  The Amazon Resource Name (ARN) of an organization. This parameter can be used only when the Attribute parameter is launchPermission.

                                  - */ - OrganizationArns?: string[]; - - /** - *

                                  The Amazon Resource Name (ARN) of an organizational unit (OU). This parameter can be used only when the Attribute parameter is launchPermission.

                                  - */ - OrganizationalUnitArns?: string[]; -} - -export namespace ModifyImageAttributeRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ModifyImageAttributeRequest): any => ({ - ...obj, - }); -} - -/** - *

                                  Describes information used to set up an EBS volume specified in a block device - * mapping.

                                  - */ -export interface EbsInstanceBlockDeviceSpecification { - /** - *

                                  Indicates whether the volume is deleted on instance termination.

                                  - */ - DeleteOnTermination?: boolean; - - /** - *

                                  The ID of the EBS volume.

                                  - */ - VolumeId?: string; -} - -export namespace EbsInstanceBlockDeviceSpecification { - /** - * @internal - */ - export const filterSensitiveLog = (obj: EbsInstanceBlockDeviceSpecification): any => ({ - ...obj, - }); -} - -/** - *

                                  Describes a block device mapping entry.

                                  - */ -export interface InstanceBlockDeviceMappingSpecification { - /** - *

                                  The device name (for example, /dev/sdh or xvdh).

                                  - */ - DeviceName?: string; - - /** - *

                                  Parameters used to automatically set up EBS volumes when the instance is - * launched.

                                  - */ - Ebs?: EbsInstanceBlockDeviceSpecification; - - /** - *

                                  suppress the specified device included in the block device mapping.

                                  - */ - NoDevice?: string; - - /** - *

                                  The virtual device name.

                                  - */ - VirtualName?: string; -} - -export namespace InstanceBlockDeviceMappingSpecification { - /** - * @internal - */ - export const filterSensitiveLog = (obj: InstanceBlockDeviceMappingSpecification): any => ({ - ...obj, - }); -} - -export interface BlobAttributeValue { - Value?: Uint8Array; -} - -export namespace BlobAttributeValue { - /** - * @internal - */ - export const filterSensitiveLog = (obj: BlobAttributeValue): any => ({ - ...obj, - }); -} diff --git a/clients/client-ec2/src/models/models_5.ts b/clients/client-ec2/src/models/models_5.ts index 03be795715d8..24b755dd7b45 100644 --- a/clients/client-ec2/src/models/models_5.ts +++ b/clients/client-ec2/src/models/models_5.ts @@ -45,6 +45,7 @@ import { PortRange, RuleAction, ShutdownBehavior, + SnapshotState, SpotInstanceType, TrafficDirection, TrafficMirrorFilter, @@ -86,6 +87,7 @@ import { InstanceNetworkInterfaceSpecification, InstanceState, InstanceStatusEvent, + LaunchPermission, LaunchTemplateConfig, Monitoring, NetworkInsightsAnalysis, @@ -97,13 +99,279 @@ import { SpotInstanceRequest, SpotPlacement, } from "./models_3"; -import { - BlobAttributeValue, - InstanceBlockDeviceMappingSpecification, - OperationType, - Purchase, - VolumeModification, -} from "./models_4"; +import { OperationType, Purchase, VolumeModification } from "./models_4"; + +export interface ModifyHostsResult { + /** + *

                                  The IDs of the Dedicated Hosts that were successfully modified.

                                  + */ + Successful?: string[]; + + /** + *

                                  The IDs of the Dedicated Hosts that could not be modified. Check whether the + * setting you requested can be used.

                                  + */ + Unsuccessful?: UnsuccessfulItem[]; +} + +export namespace ModifyHostsResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyHostsResult): any => ({ + ...obj, + }); +} + +export interface ModifyIdentityIdFormatRequest { + /** + *

                                  The ARN of the principal, which can be an IAM user, IAM role, or the root user. Specify + * all to modify the ID format for all IAM users, IAM roles, and the root user of + * the account.

                                  + */ + PrincipalArn: string | undefined; + + /** + *

                                  The type of resource: bundle | conversion-task | customer-gateway | dhcp-options | + * elastic-ip-allocation | elastic-ip-association | + * export-task | flow-log | image | + * import-task | internet-gateway | network-acl + * | network-acl-association | network-interface | + * network-interface-attachment | prefix-list | + * route-table | route-table-association | + * security-group | subnet | + * subnet-cidr-block-association | vpc | + * vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

                                  + *

                                  Alternatively, use the all-current option to include all resource types that are + * currently within their opt-in period for longer IDs.

                                  + */ + Resource: string | undefined; + + /** + *

                                  Indicates whether the resource should use longer IDs (17-character IDs)

                                  + */ + UseLongIds: boolean | undefined; +} + +export namespace ModifyIdentityIdFormatRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyIdentityIdFormatRequest): any => ({ + ...obj, + }); +} + +export interface ModifyIdFormatRequest { + /** + *

                                  The type of resource: bundle | conversion-task | customer-gateway | dhcp-options | + * elastic-ip-allocation | elastic-ip-association | + * export-task | flow-log | image | + * import-task | internet-gateway | network-acl + * | network-acl-association | network-interface | + * network-interface-attachment | prefix-list | + * route-table | route-table-association | + * security-group | subnet | + * subnet-cidr-block-association | vpc | + * vpc-cidr-block-association | vpc-endpoint | vpc-peering-connection | vpn-connection | vpn-gateway.

                                  + *

                                  Alternatively, use the all-current option to include all resource types that are + * currently within their opt-in period for longer IDs.

                                  + */ + Resource: string | undefined; + + /** + *

                                  Indicate whether the resource should use longer IDs (17-character IDs).

                                  + */ + UseLongIds: boolean | undefined; +} + +export namespace ModifyIdFormatRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyIdFormatRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes a launch permission modification.

                                  + */ +export interface LaunchPermissionModifications { + /** + *

                                  The Amazon Web Services account ID to add to the list of launch permissions for the AMI.

                                  + */ + Add?: LaunchPermission[]; + + /** + *

                                  The Amazon Web Services account ID to remove from the list of launch permissions for the AMI.

                                  + */ + Remove?: LaunchPermission[]; +} + +export namespace LaunchPermissionModifications { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LaunchPermissionModifications): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains the parameters for ModifyImageAttribute.

                                  + */ +export interface ModifyImageAttributeRequest { + /** + *

                                  The name of the attribute to modify.

                                  + *

                                  Valid values: description | launchPermission + *

                                  + */ + Attribute?: string; + + /** + *

                                  A new description for the AMI.

                                  + */ + Description?: AttributeValue; + + /** + *

                                  The ID of the AMI.

                                  + */ + ImageId: string | undefined; + + /** + *

                                  A new launch permission for the AMI.

                                  + */ + LaunchPermission?: LaunchPermissionModifications; + + /** + *

                                  The operation type. + * This parameter can be used only when the Attribute parameter is launchPermission.

                                  + */ + OperationType?: OperationType | string; + + /** + *

                                  Not supported.

                                  + */ + ProductCodes?: string[]; + + /** + *

                                  The user groups. + * This parameter can be used only when the Attribute parameter is launchPermission.

                                  + */ + UserGroups?: string[]; + + /** + *

                                  The Amazon Web Services account IDs. + * This parameter can be used only when the Attribute parameter is launchPermission.

                                  + */ + UserIds?: string[]; + + /** + *

                                  The value of the attribute being modified. + * This parameter can be used only when the Attribute parameter is description.

                                  + */ + Value?: string; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; + + /** + *

                                  The Amazon Resource Name (ARN) of an organization. This parameter can be used only when the Attribute parameter is launchPermission.

                                  + */ + OrganizationArns?: string[]; + + /** + *

                                  The Amazon Resource Name (ARN) of an organizational unit (OU). This parameter can be used only when the Attribute parameter is launchPermission.

                                  + */ + OrganizationalUnitArns?: string[]; +} + +export namespace ModifyImageAttributeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifyImageAttributeRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes information used to set up an EBS volume specified in a block device + * mapping.

                                  + */ +export interface EbsInstanceBlockDeviceSpecification { + /** + *

                                  Indicates whether the volume is deleted on instance termination.

                                  + */ + DeleteOnTermination?: boolean; + + /** + *

                                  The ID of the EBS volume.

                                  + */ + VolumeId?: string; +} + +export namespace EbsInstanceBlockDeviceSpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EbsInstanceBlockDeviceSpecification): any => ({ + ...obj, + }); +} + +/** + *

                                  Describes a block device mapping entry.

                                  + */ +export interface InstanceBlockDeviceMappingSpecification { + /** + *

                                  The device name (for example, /dev/sdh or xvdh).

                                  + */ + DeviceName?: string; + + /** + *

                                  Parameters used to automatically set up EBS volumes when the instance is + * launched.

                                  + */ + Ebs?: EbsInstanceBlockDeviceSpecification; + + /** + *

                                  suppress the specified device included in the block device mapping.

                                  + */ + NoDevice?: string; + + /** + *

                                  The virtual device name.

                                  + */ + VirtualName?: string; +} + +export namespace InstanceBlockDeviceMappingSpecification { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InstanceBlockDeviceMappingSpecification): any => ({ + ...obj, + }); +} + +export interface BlobAttributeValue { + Value?: Uint8Array; +} + +export namespace BlobAttributeValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BlobAttributeValue): any => ({ + ...obj, + }); +} export interface ModifyInstanceAttributeRequest { /** @@ -1306,6 +1574,59 @@ export namespace ModifySnapshotAttributeRequest { }); } +export enum TargetStorageTier { + archive = "archive", +} + +export interface ModifySnapshotTierRequest { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId: string | undefined; + + /** + *

                                  The name of the storage tier. You must specify archive.

                                  + */ + StorageTier?: TargetStorageTier | string; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; +} + +export namespace ModifySnapshotTierRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifySnapshotTierRequest): any => ({ + ...obj, + }); +} + +export interface ModifySnapshotTierResult { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId?: string; + + /** + *

                                  The date and time when the archive process was started.

                                  + */ + TieringStartTime?: Date; +} + +export namespace ModifySnapshotTierResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ModifySnapshotTierResult): any => ({ + ...obj, + }); +} + /** *

                                  Contains the parameters for ModifySpotFleetRequest.

                                  */ @@ -1432,6 +1753,24 @@ export interface ModifySubnetAttributeRequest { *

                                  Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

                                  */ EnableResourceNameDnsAAAARecordOnLaunch?: AttributeBooleanValue; + + /** + *

                                  + * Indicates the device position for local network interfaces in this subnet. For example, + * 1 indicates local network interfaces in this subnet are the secondary + * network interface (eth1). A local network interface cannot be the primary network + * interface (eth0). + *

                                  + */ + EnableLniAtDeviceIndex?: number; + + /** + *

                                  + * Specify true to indicate that local network interfaces at the current + * position should be disabled. + *

                                  + */ + DisableLniAtDeviceIndex?: AttributeBooleanValue; } export namespace ModifySubnetAttributeRequest { @@ -5148,6 +5487,164 @@ export namespace RestoreManagedPrefixListVersionResult { }); } +export interface RestoreSnapshotFromRecycleBinRequest { + /** + *

                                  The ID of the snapshot to restore.

                                  + */ + SnapshotId: string | undefined; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; +} + +export namespace RestoreSnapshotFromRecycleBinRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreSnapshotFromRecycleBinRequest): any => ({ + ...obj, + }); +} + +export interface RestoreSnapshotFromRecycleBinResult { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId?: string; + + /** + *

                                  The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the + * Amazon Elastic Compute Cloud User Guide.

                                  + */ + OutpostArn?: string; + + /** + *

                                  The description for the snapshot.

                                  + */ + Description?: string; + + /** + *

                                  Indicates whether the snapshot is encrypted.

                                  + */ + Encrypted?: boolean; + + /** + *

                                  The ID of the Amazon Web Services account that owns the EBS snapshot.

                                  + */ + OwnerId?: string; + + /** + *

                                  The progress of the snapshot, as a percentage.

                                  + */ + Progress?: string; + + /** + *

                                  The time stamp when the snapshot was initiated.

                                  + */ + StartTime?: Date; + + /** + *

                                  The state of the snapshot.

                                  + */ + State?: SnapshotState | string; + + /** + *

                                  The ID of the volume that was used to create the snapshot.

                                  + */ + VolumeId?: string; + + /** + *

                                  The size of the volume, in GiB.

                                  + */ + VolumeSize?: number; +} + +export namespace RestoreSnapshotFromRecycleBinResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreSnapshotFromRecycleBinResult): any => ({ + ...obj, + }); +} + +export interface RestoreSnapshotTierRequest { + /** + *

                                  The ID of the snapshot to restore.

                                  + */ + SnapshotId: string | undefined; + + /** + *

                                  Specifies the number of days for which to temporarily restore an archived snapshot. + * Required for temporary restores only. The snapshot will be automatically re-archived + * after this period.

                                  + *

                                  To temporarily restore an archived snapshot, specify the number of days and omit + * the PermanentRestore parameter or set it to + * false.

                                  + */ + TemporaryRestoreDays?: number; + + /** + *

                                  Indicates whether to permanently restore an archived snapshot. To permanently restore + * an archived snapshot, specify true and omit the + * RestoreSnapshotTierRequest$TemporaryRestoreDays parameter.

                                  + */ + PermanentRestore?: boolean; + + /** + *

                                  Checks whether you have the required permissions for the action, without actually making the request, + * and provides an error response. If you have the required permissions, the error response is DryRunOperation. + * Otherwise, it is UnauthorizedOperation.

                                  + */ + DryRun?: boolean; +} + +export namespace RestoreSnapshotTierRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreSnapshotTierRequest): any => ({ + ...obj, + }); +} + +export interface RestoreSnapshotTierResult { + /** + *

                                  The ID of the snapshot.

                                  + */ + SnapshotId?: string; + + /** + *

                                  The date and time when the snapshot restore process started.

                                  + */ + RestoreStartTime?: Date; + + /** + *

                                  For temporary restores only. The number of days for which the archived snapshot + * is temporarily restored.

                                  + */ + RestoreDuration?: number; + + /** + *

                                  Indicates whether the snapshot is permanently restored. true indicates a permanent + * restore. false indicates a temporary restore.

                                  + */ + IsPermanentRestore?: boolean; +} + +export namespace RestoreSnapshotTierResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreSnapshotTierResult): any => ({ + ...obj, + }); +} + export interface RevokeClientVpnIngressRequest { /** *

                                  The ID of the Client VPN endpoint with which the authorization rule is associated.

                                  diff --git a/clients/client-ec2/src/pagination/DescribeSnapshotTierStatusPaginator.ts b/clients/client-ec2/src/pagination/DescribeSnapshotTierStatusPaginator.ts new file mode 100644 index 000000000000..7a339b600091 --- /dev/null +++ b/clients/client-ec2/src/pagination/DescribeSnapshotTierStatusPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + DescribeSnapshotTierStatusCommand, + DescribeSnapshotTierStatusCommandInput, + DescribeSnapshotTierStatusCommandOutput, +} from "../commands/DescribeSnapshotTierStatusCommand"; +import { EC2 } from "../EC2"; +import { EC2Client } from "../EC2Client"; +import { EC2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EC2Client, + input: DescribeSnapshotTierStatusCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new DescribeSnapshotTierStatusCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: EC2, + input: DescribeSnapshotTierStatusCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.describeSnapshotTierStatus(input, ...args); +}; +export async function* paginateDescribeSnapshotTierStatus( + config: EC2PaginationConfiguration, + input: DescribeSnapshotTierStatusCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: DescribeSnapshotTierStatusCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof EC2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EC2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected EC2 | EC2Client"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-ec2/src/pagination/ListSnapshotsInRecycleBinPaginator.ts b/clients/client-ec2/src/pagination/ListSnapshotsInRecycleBinPaginator.ts new file mode 100644 index 000000000000..c54466edbd6a --- /dev/null +++ b/clients/client-ec2/src/pagination/ListSnapshotsInRecycleBinPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListSnapshotsInRecycleBinCommand, + ListSnapshotsInRecycleBinCommandInput, + ListSnapshotsInRecycleBinCommandOutput, +} from "../commands/ListSnapshotsInRecycleBinCommand"; +import { EC2 } from "../EC2"; +import { EC2Client } from "../EC2Client"; +import { EC2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EC2Client, + input: ListSnapshotsInRecycleBinCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListSnapshotsInRecycleBinCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: EC2, + input: ListSnapshotsInRecycleBinCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listSnapshotsInRecycleBin(input, ...args); +}; +export async function* paginateListSnapshotsInRecycleBin( + config: EC2PaginationConfiguration, + input: ListSnapshotsInRecycleBinCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListSnapshotsInRecycleBinCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof EC2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EC2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected EC2 | EC2Client"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-ec2/src/pagination/index.ts b/clients/client-ec2/src/pagination/index.ts index f3a141e05459..fd2206cda152 100644 --- a/clients/client-ec2/src/pagination/index.ts +++ b/clients/client-ec2/src/pagination/index.ts @@ -59,6 +59,7 @@ export * from "./DescribeScheduledInstanceAvailabilityPaginator"; export * from "./DescribeScheduledInstancesPaginator"; export * from "./DescribeSecurityGroupRulesPaginator"; export * from "./DescribeSecurityGroupsPaginator"; +export * from "./DescribeSnapshotTierStatusPaginator"; export * from "./DescribeSnapshotsPaginator"; export * from "./DescribeSpotFleetRequestsPaginator"; export * from "./DescribeSpotInstanceRequestsPaginator"; @@ -102,5 +103,6 @@ export * from "./GetTransitGatewayPrefixListReferencesPaginator"; export * from "./GetTransitGatewayRouteTableAssociationsPaginator"; export * from "./GetTransitGatewayRouteTablePropagationsPaginator"; export * from "./GetVpnConnectionDeviceTypesPaginator"; +export * from "./ListSnapshotsInRecycleBinPaginator"; export * from "./SearchLocalGatewayRoutesPaginator"; export * from "./SearchTransitGatewayMulticastGroupsPaginator"; diff --git a/clients/client-ec2/src/protocols/Aws_ec2.ts b/clients/client-ec2/src/protocols/Aws_ec2.ts index 4ffb3b2b6ae7..1293e7d0c68b 100644 --- a/clients/client-ec2/src/protocols/Aws_ec2.ts +++ b/clients/client-ec2/src/protocols/Aws_ec2.ts @@ -871,6 +871,10 @@ import { DescribeSnapshotAttributeCommandOutput, } from "../commands/DescribeSnapshotAttributeCommand"; import { DescribeSnapshotsCommandInput, DescribeSnapshotsCommandOutput } from "../commands/DescribeSnapshotsCommand"; +import { + DescribeSnapshotTierStatusCommandInput, + DescribeSnapshotTierStatusCommandOutput, +} from "../commands/DescribeSnapshotTierStatusCommand"; import { DescribeSpotDatafeedSubscriptionCommandInput, DescribeSpotDatafeedSubscriptionCommandOutput, @@ -1263,6 +1267,10 @@ import { ImportInstanceCommandInput, ImportInstanceCommandOutput } from "../comm import { ImportKeyPairCommandInput, ImportKeyPairCommandOutput } from "../commands/ImportKeyPairCommand"; import { ImportSnapshotCommandInput, ImportSnapshotCommandOutput } from "../commands/ImportSnapshotCommand"; import { ImportVolumeCommandInput, ImportVolumeCommandOutput } from "../commands/ImportVolumeCommand"; +import { + ListSnapshotsInRecycleBinCommandInput, + ListSnapshotsInRecycleBinCommandOutput, +} from "../commands/ListSnapshotsInRecycleBinCommand"; import { ModifyAddressAttributeCommandInput, ModifyAddressAttributeCommandOutput, @@ -1362,6 +1370,7 @@ import { ModifySnapshotAttributeCommandInput, ModifySnapshotAttributeCommandOutput, } from "../commands/ModifySnapshotAttributeCommand"; +import { ModifySnapshotTierCommandInput, ModifySnapshotTierCommandOutput } from "../commands/ModifySnapshotTierCommand"; import { ModifySpotFleetRequestCommandInput, ModifySpotFleetRequestCommandOutput, @@ -1551,6 +1560,14 @@ import { RestoreManagedPrefixListVersionCommandInput, RestoreManagedPrefixListVersionCommandOutput, } from "../commands/RestoreManagedPrefixListVersionCommand"; +import { + RestoreSnapshotFromRecycleBinCommandInput, + RestoreSnapshotFromRecycleBinCommandOutput, +} from "../commands/RestoreSnapshotFromRecycleBinCommand"; +import { + RestoreSnapshotTierCommandInput, + RestoreSnapshotTierCommandOutput, +} from "../commands/RestoreSnapshotTierCommand"; import { RevokeClientVpnIngressCommandInput, RevokeClientVpnIngressCommandOutput, @@ -1947,7 +1964,6 @@ import { CreateTransitGatewayVpcAttachmentRequestOptions, CreateTransitGatewayVpcAttachmentResult, CreateVolumeRequest, - CreateVpcEndpointConnectionNotificationRequest, CreateVpcEndpointRequest, CreateVpcEndpointResult, CreateVpcRequest, @@ -2106,6 +2122,7 @@ import { ConnectionLogResponseOptions, ConnectionNotification, ConversionTask, + CreateVpcEndpointConnectionNotificationRequest, CreateVpcEndpointConnectionNotificationResult, CreateVpcEndpointServiceConfigurationRequest, CreateVpcEndpointServiceConfigurationResult, @@ -2291,7 +2308,6 @@ import { DescribeFpgaImagesResult, DescribeHostReservationOfferingsRequest, DescribeHostReservationOfferingsResult, - DescribeHostReservationsRequest, DestinationOptionsResponse, DirectoryServiceAuthentication, DiskImageDescription, @@ -2374,6 +2390,7 @@ import { ClassicLoadBalancersConfig, CpuOptions, CreateVolumePermission, + DescribeHostReservationsRequest, DescribeHostReservationsResult, DescribeHostsRequest, DescribeHostsResult, @@ -2481,6 +2498,8 @@ import { DescribeSnapshotAttributeResult, DescribeSnapshotsRequest, DescribeSnapshotsResult, + DescribeSnapshotTierStatusRequest, + DescribeSnapshotTierStatusResult, DescribeSpotDatafeedSubscriptionRequest, DescribeSpotDatafeedSubscriptionResult, DescribeSpotFleetInstancesRequest, @@ -2492,8 +2511,6 @@ import { DescribeSpotInstanceRequestsRequest, DescribeSpotInstanceRequestsResult, DescribeSpotPriceHistoryRequest, - DescribeSpotPriceHistoryResult, - DescribeStaleSecurityGroupsRequest, DiskInfo, EbsInfo, EbsInstanceBlockDevice, @@ -2594,6 +2611,7 @@ import { SlotStartTimeRangeRequest, SnapshotDetail, SnapshotTaskDetail, + SnapshotTierStatus, SpotCapacityRebalance, SpotFleetLaunchSpecification, SpotFleetMonitoring, @@ -2604,9 +2622,6 @@ import { SpotInstanceStatus, SpotMaintenanceStrategies, SpotPlacement, - SpotPrice, - StaleIpPermission, - StaleSecurityGroup, StateReason, TargetGroup, TargetGroupsConfig, @@ -2618,12 +2633,13 @@ import { import { AssociatedRole, AthenaIntegration, - BlobAttributeValue, CapacityReservationGroup, ClassicLinkDnsSupport, ClientCertificateRevocationListStatus, ClientData, CoipAddressUsage, + DescribeSpotPriceHistoryResult, + DescribeStaleSecurityGroupsRequest, DescribeStaleSecurityGroupsResult, DescribeStoreImageTasksRequest, DescribeStoreImageTasksResult, @@ -2737,7 +2753,6 @@ import { DiskImage, DiskImageDetail, DnsServersOptionsModifyStructure, - EbsInstanceBlockDeviceSpecification, EnableEbsEncryptionByDefaultRequest, EnableEbsEncryptionByDefaultResult, EnableFastSnapshotRestoreErrorItem, @@ -2838,7 +2853,6 @@ import { ImportSnapshotResult, ImportVolumeRequest, ImportVolumeResult, - InstanceBlockDeviceMappingSpecification, InstanceEventWindowDisassociationRequest, InstanceFamilyCreditSpecification, InstanceRequirementsWithMetadataRequest, @@ -2846,7 +2860,8 @@ import { InstanceUsage, IntegrateServices, Ipv6CidrAssociation, - LaunchPermissionModifications, + ListSnapshotsInRecycleBinRequest, + ListSnapshotsInRecycleBinResult, LoadPermissionModifications, LoadPermissionRequest, ModifyAddressAttributeRequest, @@ -2868,10 +2883,6 @@ import { ModifyFpgaImageAttributeRequest, ModifyFpgaImageAttributeResult, ModifyHostsRequest, - ModifyHostsResult, - ModifyIdentityIdFormatRequest, - ModifyIdFormatRequest, - ModifyImageAttributeRequest, PrefixListAssociation, PrefixListEntry, PrivateDnsDetails, @@ -2880,7 +2891,11 @@ import { ReservedInstanceReservationValue, ServiceDetail, SnapshotDiskContainer, + SnapshotRecycleBinInfo, SpotPlacementScore, + SpotPrice, + StaleIpPermission, + StaleSecurityGroup, StoreImageTaskResult, TagDescription, TargetConfiguration, @@ -2907,20 +2922,28 @@ import { VpnConnectionDeviceType, } from "../models/models_4"; import { + BlobAttributeValue, CapacityReservationSpecification, CidrAuthorizationContext, CpuOptionsRequest, CreateVolumePermissionModifications, + EbsInstanceBlockDeviceSpecification, ElasticInferenceAccelerator, EnclaveOptionsRequest, HibernationOptionsRequest, + InstanceBlockDeviceMappingSpecification, InstanceCreditSpecificationRequest, InstanceMarketOptionsRequest, InstanceMetadataOptionsRequest, InstanceMonitoring, InstanceStateChange, + LaunchPermissionModifications, LaunchTemplateSpecification, LicenseConfigurationRequest, + ModifyHostsResult, + ModifyIdentityIdFormatRequest, + ModifyIdFormatRequest, + ModifyImageAttributeRequest, ModifyInstanceAttributeRequest, ModifyInstanceCapacityReservationAttributesRequest, ModifyInstanceCapacityReservationAttributesResult, @@ -2946,6 +2969,8 @@ import { ModifySecurityGroupRulesRequest, ModifySecurityGroupRulesResult, ModifySnapshotAttributeRequest, + ModifySnapshotTierRequest, + ModifySnapshotTierResult, ModifySpotFleetRequestRequest, ModifySpotFleetRequestResponse, ModifySubnetAttributeRequest, @@ -3061,6 +3086,10 @@ import { RestoreAddressToClassicResult, RestoreManagedPrefixListVersionRequest, RestoreManagedPrefixListVersionResult, + RestoreSnapshotFromRecycleBinRequest, + RestoreSnapshotFromRecycleBinResult, + RestoreSnapshotTierRequest, + RestoreSnapshotTierResult, RevokeClientVpnIngressRequest, RevokeClientVpnIngressResult, RevokeSecurityGroupEgressRequest, @@ -7268,6 +7297,22 @@ export const serializeAws_ec2DescribeSnapshotsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_ec2DescribeSnapshotTierStatusCommand = async ( + input: DescribeSnapshotTierStatusCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2DescribeSnapshotTierStatusRequest(input, context), + Action: "DescribeSnapshotTierStatus", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_ec2DescribeSpotDatafeedSubscriptionCommand = async ( input: DescribeSpotDatafeedSubscriptionCommandInput, context: __SerdeContext @@ -9028,6 +9073,22 @@ export const serializeAws_ec2ImportVolumeCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_ec2ListSnapshotsInRecycleBinCommand = async ( + input: ListSnapshotsInRecycleBinCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2ListSnapshotsInRecycleBinRequest(input, context), + Action: "ListSnapshotsInRecycleBin", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_ec2ModifyAddressAttributeCommand = async ( input: ModifyAddressAttributeCommandInput, context: __SerdeContext @@ -9460,6 +9521,22 @@ export const serializeAws_ec2ModifySnapshotAttributeCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_ec2ModifySnapshotTierCommand = async ( + input: ModifySnapshotTierCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2ModifySnapshotTierRequest(input, context), + Action: "ModifySnapshotTier", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_ec2ModifySpotFleetRequestCommand = async ( input: ModifySpotFleetRequestCommandInput, context: __SerdeContext @@ -10372,6 +10449,38 @@ export const serializeAws_ec2RestoreManagedPrefixListVersionCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_ec2RestoreSnapshotFromRecycleBinCommand = async ( + input: RestoreSnapshotFromRecycleBinCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2RestoreSnapshotFromRecycleBinRequest(input, context), + Action: "RestoreSnapshotFromRecycleBin", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_ec2RestoreSnapshotTierCommand = async ( + input: RestoreSnapshotTierCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-www-form-urlencoded", + }; + let body: any; + body = buildFormUrlencodedString({ + ...serializeAws_ec2RestoreSnapshotTierRequest(input, context), + Action: "RestoreSnapshotTier", + Version: "2016-11-15", + }); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_ec2RevokeClientVpnIngressCommand = async ( input: RevokeClientVpnIngressCommandInput, context: __SerdeContext @@ -22541,6 +22650,52 @@ const deserializeAws_ec2DescribeSnapshotsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_ec2DescribeSnapshotTierStatusCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2DescribeSnapshotTierStatusCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2DescribeSnapshotTierStatusResult(data, context); + const response: DescribeSnapshotTierStatusCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2DescribeSnapshotTierStatusCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_ec2DescribeSpotDatafeedSubscriptionCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -27577,6 +27732,52 @@ const deserializeAws_ec2ImportVolumeCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_ec2ListSnapshotsInRecycleBinCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2ListSnapshotsInRecycleBinCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2ListSnapshotsInRecycleBinResult(data, context); + const response: ListSnapshotsInRecycleBinCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2ListSnapshotsInRecycleBinCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_ec2ModifyAddressAttributeCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -28801,6 +29002,52 @@ const deserializeAws_ec2ModifySnapshotAttributeCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_ec2ModifySnapshotTierCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2ModifySnapshotTierCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2ModifySnapshotTierResult(data, context); + const response: ModifySnapshotTierCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2ModifySnapshotTierCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_ec2ModifySpotFleetRequestCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -31387,27 +31634,27 @@ const deserializeAws_ec2RestoreManagedPrefixListVersionCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_ec2RevokeClientVpnIngressCommand = async ( +export const deserializeAws_ec2RestoreSnapshotFromRecycleBinCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_ec2RevokeClientVpnIngressCommandError(output, context); + return deserializeAws_ec2RestoreSnapshotFromRecycleBinCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_ec2RevokeClientVpnIngressResult(data, context); - const response: RevokeClientVpnIngressCommandOutput = { + contents = deserializeAws_ec2RestoreSnapshotFromRecycleBinResult(data, context); + const response: RestoreSnapshotFromRecycleBinCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_ec2RevokeClientVpnIngressCommandError = async ( +const deserializeAws_ec2RestoreSnapshotFromRecycleBinCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -31433,27 +31680,27 @@ const deserializeAws_ec2RevokeClientVpnIngressCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_ec2RevokeSecurityGroupEgressCommand = async ( +export const deserializeAws_ec2RestoreSnapshotTierCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_ec2RevokeSecurityGroupEgressCommandError(output, context); + return deserializeAws_ec2RestoreSnapshotTierCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_ec2RevokeSecurityGroupEgressResult(data, context); - const response: RevokeSecurityGroupEgressCommandOutput = { + contents = deserializeAws_ec2RestoreSnapshotTierResult(data, context); + const response: RestoreSnapshotTierCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_ec2RevokeSecurityGroupEgressCommandError = async ( +const deserializeAws_ec2RestoreSnapshotTierCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -31479,27 +31726,27 @@ const deserializeAws_ec2RevokeSecurityGroupEgressCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_ec2RevokeSecurityGroupIngressCommand = async ( +export const deserializeAws_ec2RevokeClientVpnIngressCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_ec2RevokeSecurityGroupIngressCommandError(output, context); + return deserializeAws_ec2RevokeClientVpnIngressCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_ec2RevokeSecurityGroupIngressResult(data, context); - const response: RevokeSecurityGroupIngressCommandOutput = { + contents = deserializeAws_ec2RevokeClientVpnIngressResult(data, context); + const response: RevokeClientVpnIngressCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_ec2RevokeSecurityGroupIngressCommandError = async ( +const deserializeAws_ec2RevokeClientVpnIngressCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -31525,27 +31772,27 @@ const deserializeAws_ec2RevokeSecurityGroupIngressCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_ec2RunInstancesCommand = async ( +export const deserializeAws_ec2RevokeSecurityGroupEgressCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_ec2RunInstancesCommandError(output, context); + return deserializeAws_ec2RevokeSecurityGroupEgressCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_ec2Reservation(data, context); - const response: RunInstancesCommandOutput = { + contents = deserializeAws_ec2RevokeSecurityGroupEgressResult(data, context); + const response: RevokeSecurityGroupEgressCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_ec2RunInstancesCommandError = async ( +const deserializeAws_ec2RevokeSecurityGroupEgressCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -31571,27 +31818,119 @@ const deserializeAws_ec2RunInstancesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_ec2RunScheduledInstancesCommand = async ( +export const deserializeAws_ec2RevokeSecurityGroupIngressCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_ec2RunScheduledInstancesCommandError(output, context); + return deserializeAws_ec2RevokeSecurityGroupIngressCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_ec2RunScheduledInstancesResult(data, context); - const response: RunScheduledInstancesCommandOutput = { + contents = deserializeAws_ec2RevokeSecurityGroupIngressResult(data, context); + const response: RevokeSecurityGroupIngressCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_ec2RunScheduledInstancesCommandError = async ( +const deserializeAws_ec2RevokeSecurityGroupIngressCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_ec2RunInstancesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2RunInstancesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2Reservation(data, context); + const response: RunInstancesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2RunInstancesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadEc2ErrorCode(output, parsedOutput.body); + switch (errorCode) { + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.Errors.Error.code || parsedBody.Errors.Error.Code || errorCode; + response = { + ...parsedBody.Errors.Error, + name: `${errorCode}`, + message: parsedBody.Errors.Error.message || parsedBody.Errors.Error.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_ec2RunScheduledInstancesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_ec2RunScheduledInstancesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_ec2RunScheduledInstancesResult(data, context); + const response: RunScheduledInstancesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_ec2RunScheduledInstancesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -39807,6 +40146,30 @@ const serializeAws_ec2DescribeSnapshotsRequest = (input: DescribeSnapshotsReques return entries; }; +const serializeAws_ec2DescribeSnapshotTierStatusRequest = ( + input: DescribeSnapshotTierStatusRequest, + context: __SerdeContext +): any => { + const entries: any = {}; + if (input.Filters !== undefined && input.Filters !== null) { + const memberEntries = serializeAws_ec2FilterList(input.Filters, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `Filter.${key.substring(key.indexOf(".") + 1)}`; + entries[loc] = value; + }); + } + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + if (input.NextToken !== undefined && input.NextToken !== null) { + entries["NextToken"] = input.NextToken; + } + if (input.MaxResults !== undefined && input.MaxResults !== null) { + entries["MaxResults"] = input.MaxResults; + } + return entries; +}; + const serializeAws_ec2DescribeSpotDatafeedSubscriptionRequest = ( input: DescribeSpotDatafeedSubscriptionRequest, context: __SerdeContext @@ -45001,6 +45364,30 @@ const serializeAws_ec2LicenseSpecificationListRequest = ( return entries; }; +const serializeAws_ec2ListSnapshotsInRecycleBinRequest = ( + input: ListSnapshotsInRecycleBinRequest, + context: __SerdeContext +): any => { + const entries: any = {}; + if (input.MaxResults !== undefined && input.MaxResults !== null) { + entries["MaxResults"] = input.MaxResults; + } + if (input.NextToken !== undefined && input.NextToken !== null) { + entries["NextToken"] = input.NextToken; + } + if (input.SnapshotIds !== undefined && input.SnapshotIds !== null) { + const memberEntries = serializeAws_ec2SnapshotIdStringList(input.SnapshotIds, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `SnapshotId.${key.substring(key.indexOf(".") + 1)}`; + entries[loc] = value; + }); + } + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + return entries; +}; + const serializeAws_ec2LoadBalancersConfig = (input: LoadBalancersConfig, context: __SerdeContext): any => { const entries: any = {}; if (input.ClassicLoadBalancersConfig !== undefined && input.ClassicLoadBalancersConfig !== null) { @@ -46065,6 +46452,20 @@ const serializeAws_ec2ModifySnapshotAttributeRequest = ( return entries; }; +const serializeAws_ec2ModifySnapshotTierRequest = (input: ModifySnapshotTierRequest, context: __SerdeContext): any => { + const entries: any = {}; + if (input.SnapshotId !== undefined && input.SnapshotId !== null) { + entries["SnapshotId"] = input.SnapshotId; + } + if (input.StorageTier !== undefined && input.StorageTier !== null) { + entries["StorageTier"] = input.StorageTier; + } + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + return entries; +}; + const serializeAws_ec2ModifySpotFleetRequestRequest = ( input: ModifySpotFleetRequestRequest, context: __SerdeContext @@ -46154,6 +46555,16 @@ const serializeAws_ec2ModifySubnetAttributeRequest = ( entries[loc] = value; }); } + if (input.EnableLniAtDeviceIndex !== undefined && input.EnableLniAtDeviceIndex !== null) { + entries["EnableLniAtDeviceIndex"] = input.EnableLniAtDeviceIndex; + } + if (input.DisableLniAtDeviceIndex !== undefined && input.DisableLniAtDeviceIndex !== null) { + const memberEntries = serializeAws_ec2AttributeBooleanValue(input.DisableLniAtDeviceIndex, context); + Object.entries(memberEntries).forEach(([key, value]) => { + const loc = `DisableLniAtDeviceIndex.${key}`; + entries[loc] = value; + }); + } return entries; }; @@ -49109,6 +49520,40 @@ const serializeAws_ec2RestoreManagedPrefixListVersionRequest = ( return entries; }; +const serializeAws_ec2RestoreSnapshotFromRecycleBinRequest = ( + input: RestoreSnapshotFromRecycleBinRequest, + context: __SerdeContext +): any => { + const entries: any = {}; + if (input.SnapshotId !== undefined && input.SnapshotId !== null) { + entries["SnapshotId"] = input.SnapshotId; + } + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + return entries; +}; + +const serializeAws_ec2RestoreSnapshotTierRequest = ( + input: RestoreSnapshotTierRequest, + context: __SerdeContext +): any => { + const entries: any = {}; + if (input.SnapshotId !== undefined && input.SnapshotId !== null) { + entries["SnapshotId"] = input.SnapshotId; + } + if (input.TemporaryRestoreDays !== undefined && input.TemporaryRestoreDays !== null) { + entries["TemporaryRestoreDays"] = input.TemporaryRestoreDays; + } + if (input.PermanentRestore !== undefined && input.PermanentRestore !== null) { + entries["PermanentRestore"] = input.PermanentRestore; + } + if (input.DryRun !== undefined && input.DryRun !== null) { + entries["DryRun"] = input.DryRun; + } + return entries; +}; + const serializeAws_ec2RevokeClientVpnIngressRequest = ( input: RevokeClientVpnIngressRequest, context: __SerdeContext @@ -58693,6 +59138,29 @@ const deserializeAws_ec2DescribeSnapshotsResult = (output: any, context: __Serde return contents; }; +const deserializeAws_ec2DescribeSnapshotTierStatusResult = ( + output: any, + context: __SerdeContext +): DescribeSnapshotTierStatusResult => { + const contents: any = { + SnapshotTierStatuses: undefined, + NextToken: undefined, + }; + if (output.snapshotTierStatusSet === "") { + contents.SnapshotTierStatuses = []; + } + if (output["snapshotTierStatusSet"] !== undefined && output["snapshotTierStatusSet"]["item"] !== undefined) { + contents.SnapshotTierStatuses = deserializeAws_ec2snapshotTierStatusSet( + __getArrayIfSingleItem(output["snapshotTierStatusSet"]["item"]), + context + ); + } + if (output["nextToken"] !== undefined) { + contents.NextToken = __expectString(output["nextToken"]); + } + return contents; +}; + const deserializeAws_ec2DescribeSpotDatafeedSubscriptionResult = ( output: any, context: __SerdeContext @@ -67193,6 +67661,29 @@ const deserializeAws_ec2LicenseList = (output: any, context: __SerdeContext): Li }); }; +const deserializeAws_ec2ListSnapshotsInRecycleBinResult = ( + output: any, + context: __SerdeContext +): ListSnapshotsInRecycleBinResult => { + const contents: any = { + Snapshots: undefined, + NextToken: undefined, + }; + if (output.snapshotSet === "") { + contents.Snapshots = []; + } + if (output["snapshotSet"] !== undefined && output["snapshotSet"]["item"] !== undefined) { + contents.Snapshots = deserializeAws_ec2SnapshotRecycleBinInfoList( + __getArrayIfSingleItem(output["snapshotSet"]["item"]), + context + ); + } + if (output["nextToken"] !== undefined) { + contents.NextToken = __expectString(output["nextToken"]); + } + return contents; +}; + const deserializeAws_ec2LoadBalancersConfig = (output: any, context: __SerdeContext): LoadBalancersConfig => { const contents: any = { ClassicLoadBalancersConfig: undefined, @@ -68041,6 +68532,20 @@ const deserializeAws_ec2ModifySecurityGroupRulesResult = ( return contents; }; +const deserializeAws_ec2ModifySnapshotTierResult = (output: any, context: __SerdeContext): ModifySnapshotTierResult => { + const contents: any = { + SnapshotId: undefined, + TieringStartTime: undefined, + }; + if (output["snapshotId"] !== undefined) { + contents.SnapshotId = __expectString(output["snapshotId"]); + } + if (output["tieringStartTime"] !== undefined) { + contents.TieringStartTime = __expectNonNull(__parseRfc3339DateTime(output["tieringStartTime"])); + } + return contents; +}; + const deserializeAws_ec2ModifySpotFleetRequestResponse = ( output: any, context: __SerdeContext @@ -71596,6 +72101,80 @@ const deserializeAws_ec2RestoreManagedPrefixListVersionResult = ( return contents; }; +const deserializeAws_ec2RestoreSnapshotFromRecycleBinResult = ( + output: any, + context: __SerdeContext +): RestoreSnapshotFromRecycleBinResult => { + const contents: any = { + SnapshotId: undefined, + OutpostArn: undefined, + Description: undefined, + Encrypted: undefined, + OwnerId: undefined, + Progress: undefined, + StartTime: undefined, + State: undefined, + VolumeId: undefined, + VolumeSize: undefined, + }; + if (output["snapshotId"] !== undefined) { + contents.SnapshotId = __expectString(output["snapshotId"]); + } + if (output["outpostArn"] !== undefined) { + contents.OutpostArn = __expectString(output["outpostArn"]); + } + if (output["description"] !== undefined) { + contents.Description = __expectString(output["description"]); + } + if (output["encrypted"] !== undefined) { + contents.Encrypted = __parseBoolean(output["encrypted"]); + } + if (output["ownerId"] !== undefined) { + contents.OwnerId = __expectString(output["ownerId"]); + } + if (output["progress"] !== undefined) { + contents.Progress = __expectString(output["progress"]); + } + if (output["startTime"] !== undefined) { + contents.StartTime = __expectNonNull(__parseRfc3339DateTime(output["startTime"])); + } + if (output["status"] !== undefined) { + contents.State = __expectString(output["status"]); + } + if (output["volumeId"] !== undefined) { + contents.VolumeId = __expectString(output["volumeId"]); + } + if (output["volumeSize"] !== undefined) { + contents.VolumeSize = __strictParseInt32(output["volumeSize"]) as number; + } + return contents; +}; + +const deserializeAws_ec2RestoreSnapshotTierResult = ( + output: any, + context: __SerdeContext +): RestoreSnapshotTierResult => { + const contents: any = { + SnapshotId: undefined, + RestoreStartTime: undefined, + RestoreDuration: undefined, + IsPermanentRestore: undefined, + }; + if (output["snapshotId"] !== undefined) { + contents.SnapshotId = __expectString(output["snapshotId"]); + } + if (output["restoreStartTime"] !== undefined) { + contents.RestoreStartTime = __expectNonNull(__parseRfc3339DateTime(output["restoreStartTime"])); + } + if (output["restoreDuration"] !== undefined) { + contents.RestoreDuration = __strictParseInt32(output["restoreDuration"]) as number; + } + if (output["isPermanentRestore"] !== undefined) { + contents.IsPermanentRestore = __parseBoolean(output["isPermanentRestore"]); + } + return contents; +}; + const deserializeAws_ec2RevokeClientVpnIngressResult = ( output: any, context: __SerdeContext @@ -72611,6 +73190,8 @@ const deserializeAws_ec2Snapshot = (output: any, context: __SerdeContext): Snaps OwnerAlias: undefined, OutpostArn: undefined, Tags: undefined, + StorageTier: undefined, + RestoreExpiryTime: undefined, }; if (output["dataEncryptionKeyId"] !== undefined) { contents.DataEncryptionKeyId = __expectString(output["dataEncryptionKeyId"]); @@ -72660,6 +73241,12 @@ const deserializeAws_ec2Snapshot = (output: any, context: __SerdeContext): Snaps if (output["tagSet"] !== undefined && output["tagSet"]["item"] !== undefined) { contents.Tags = deserializeAws_ec2TagList(__getArrayIfSingleItem(output["tagSet"]["item"]), context); } + if (output["storageTier"] !== undefined) { + contents.StorageTier = __expectString(output["storageTier"]); + } + if (output["restoreExpiryTime"] !== undefined) { + contents.RestoreExpiryTime = __expectNonNull(__parseRfc3339DateTime(output["restoreExpiryTime"])); + } return contents; }; @@ -72784,6 +73371,46 @@ const deserializeAws_ec2SnapshotList = (output: any, context: __SerdeContext): S }); }; +const deserializeAws_ec2SnapshotRecycleBinInfo = (output: any, context: __SerdeContext): SnapshotRecycleBinInfo => { + const contents: any = { + SnapshotId: undefined, + RecycleBinEnterTime: undefined, + RecycleBinExitTime: undefined, + Description: undefined, + VolumeId: undefined, + }; + if (output["snapshotId"] !== undefined) { + contents.SnapshotId = __expectString(output["snapshotId"]); + } + if (output["recycleBinEnterTime"] !== undefined) { + contents.RecycleBinEnterTime = __expectNonNull(__parseRfc3339DateTime(output["recycleBinEnterTime"])); + } + if (output["recycleBinExitTime"] !== undefined) { + contents.RecycleBinExitTime = __expectNonNull(__parseRfc3339DateTime(output["recycleBinExitTime"])); + } + if (output["description"] !== undefined) { + contents.Description = __expectString(output["description"]); + } + if (output["volumeId"] !== undefined) { + contents.VolumeId = __expectString(output["volumeId"]); + } + return contents; +}; + +const deserializeAws_ec2SnapshotRecycleBinInfoList = ( + output: any, + context: __SerdeContext +): SnapshotRecycleBinInfo[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_ec2SnapshotRecycleBinInfo(entry, context); + }); +}; + const deserializeAws_ec2SnapshotSet = (output: any, context: __SerdeContext): SnapshotInfo[] => { return (output || []) .filter((e: any) => e != null) @@ -72845,6 +73472,74 @@ const deserializeAws_ec2SnapshotTaskDetail = (output: any, context: __SerdeConte return contents; }; +const deserializeAws_ec2SnapshotTierStatus = (output: any, context: __SerdeContext): SnapshotTierStatus => { + const contents: any = { + SnapshotId: undefined, + VolumeId: undefined, + Status: undefined, + OwnerId: undefined, + Tags: undefined, + StorageTier: undefined, + LastTieringStartTime: undefined, + LastTieringProgress: undefined, + LastTieringOperationStatus: undefined, + LastTieringOperationStatusDetail: undefined, + ArchivalCompleteTime: undefined, + RestoreExpiryTime: undefined, + }; + if (output["snapshotId"] !== undefined) { + contents.SnapshotId = __expectString(output["snapshotId"]); + } + if (output["volumeId"] !== undefined) { + contents.VolumeId = __expectString(output["volumeId"]); + } + if (output["status"] !== undefined) { + contents.Status = __expectString(output["status"]); + } + if (output["ownerId"] !== undefined) { + contents.OwnerId = __expectString(output["ownerId"]); + } + if (output.tagSet === "") { + contents.Tags = []; + } + if (output["tagSet"] !== undefined && output["tagSet"]["item"] !== undefined) { + contents.Tags = deserializeAws_ec2TagList(__getArrayIfSingleItem(output["tagSet"]["item"]), context); + } + if (output["storageTier"] !== undefined) { + contents.StorageTier = __expectString(output["storageTier"]); + } + if (output["lastTieringStartTime"] !== undefined) { + contents.LastTieringStartTime = __expectNonNull(__parseRfc3339DateTime(output["lastTieringStartTime"])); + } + if (output["lastTieringProgress"] !== undefined) { + contents.LastTieringProgress = __strictParseInt32(output["lastTieringProgress"]) as number; + } + if (output["lastTieringOperationStatus"] !== undefined) { + contents.LastTieringOperationStatus = __expectString(output["lastTieringOperationStatus"]); + } + if (output["lastTieringOperationStatusDetail"] !== undefined) { + contents.LastTieringOperationStatusDetail = __expectString(output["lastTieringOperationStatusDetail"]); + } + if (output["archivalCompleteTime"] !== undefined) { + contents.ArchivalCompleteTime = __expectNonNull(__parseRfc3339DateTime(output["archivalCompleteTime"])); + } + if (output["restoreExpiryTime"] !== undefined) { + contents.RestoreExpiryTime = __expectNonNull(__parseRfc3339DateTime(output["restoreExpiryTime"])); + } + return contents; +}; + +const deserializeAws_ec2snapshotTierStatusSet = (output: any, context: __SerdeContext): SnapshotTierStatus[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_ec2SnapshotTierStatus(entry, context); + }); +}; + const deserializeAws_ec2SpotCapacityRebalance = (output: any, context: __SerdeContext): SpotCapacityRebalance => { const contents: any = { ReplacementStrategy: undefined, @@ -73748,6 +74443,7 @@ const deserializeAws_ec2Subnet = (output: any, context: __SerdeContext): Subnet AvailableIpAddressCount: undefined, CidrBlock: undefined, DefaultForAz: undefined, + EnableLniAtDeviceIndex: undefined, MapPublicIpOnLaunch: undefined, MapCustomerOwnedIpOnLaunch: undefined, CustomerOwnedIpv4Pool: undefined, @@ -73779,6 +74475,9 @@ const deserializeAws_ec2Subnet = (output: any, context: __SerdeContext): Subnet if (output["defaultForAz"] !== undefined) { contents.DefaultForAz = __parseBoolean(output["defaultForAz"]); } + if (output["enableLniAtDeviceIndex"] !== undefined) { + contents.EnableLniAtDeviceIndex = __strictParseInt32(output["enableLniAtDeviceIndex"]) as number; + } if (output["mapPublicIpOnLaunch"] !== undefined) { contents.MapPublicIpOnLaunch = __parseBoolean(output["mapPublicIpOnLaunch"]); } diff --git a/clients/client-ecr/src/ECR.ts b/clients/client-ecr/src/ECR.ts index 3780fb535ba7..8498e0c160aa 100644 --- a/clients/client-ecr/src/ECR.ts +++ b/clients/client-ecr/src/ECR.ts @@ -15,11 +15,21 @@ import { BatchGetImageCommandInput, BatchGetImageCommandOutput, } from "./commands/BatchGetImageCommand"; +import { + BatchGetRepositoryScanningConfigurationCommand, + BatchGetRepositoryScanningConfigurationCommandInput, + BatchGetRepositoryScanningConfigurationCommandOutput, +} from "./commands/BatchGetRepositoryScanningConfigurationCommand"; import { CompleteLayerUploadCommand, CompleteLayerUploadCommandInput, CompleteLayerUploadCommandOutput, } from "./commands/CompleteLayerUploadCommand"; +import { + CreatePullThroughCacheRuleCommand, + CreatePullThroughCacheRuleCommandInput, + CreatePullThroughCacheRuleCommandOutput, +} from "./commands/CreatePullThroughCacheRuleCommand"; import { CreateRepositoryCommand, CreateRepositoryCommandInput, @@ -30,6 +40,11 @@ import { DeleteLifecyclePolicyCommandInput, DeleteLifecyclePolicyCommandOutput, } from "./commands/DeleteLifecyclePolicyCommand"; +import { + DeletePullThroughCacheRuleCommand, + DeletePullThroughCacheRuleCommandInput, + DeletePullThroughCacheRuleCommandOutput, +} from "./commands/DeletePullThroughCacheRuleCommand"; import { DeleteRegistryPolicyCommand, DeleteRegistryPolicyCommandInput, @@ -60,6 +75,11 @@ import { DescribeImagesCommandInput, DescribeImagesCommandOutput, } from "./commands/DescribeImagesCommand"; +import { + DescribePullThroughCacheRulesCommand, + DescribePullThroughCacheRulesCommandInput, + DescribePullThroughCacheRulesCommandOutput, +} from "./commands/DescribePullThroughCacheRulesCommand"; import { DescribeRegistryCommand, DescribeRegistryCommandInput, @@ -95,6 +115,11 @@ import { GetRegistryPolicyCommandInput, GetRegistryPolicyCommandOutput, } from "./commands/GetRegistryPolicyCommand"; +import { + GetRegistryScanningConfigurationCommand, + GetRegistryScanningConfigurationCommandInput, + GetRegistryScanningConfigurationCommandOutput, +} from "./commands/GetRegistryScanningConfigurationCommand"; import { GetRepositoryPolicyCommand, GetRepositoryPolicyCommandInput, @@ -132,6 +157,11 @@ import { PutRegistryPolicyCommandInput, PutRegistryPolicyCommandOutput, } from "./commands/PutRegistryPolicyCommand"; +import { + PutRegistryScanningConfigurationCommand, + PutRegistryScanningConfigurationCommandInput, + PutRegistryScanningConfigurationCommandOutput, +} from "./commands/PutRegistryScanningConfigurationCommand"; import { PutReplicationConfigurationCommand, PutReplicationConfigurationCommandInput, @@ -288,6 +318,40 @@ export class ECR extends ECRClient { } } + /** + *

                                  Gets the scanning configuration for one or more repositories.

                                  + */ + public batchGetRepositoryScanningConfiguration( + args: BatchGetRepositoryScanningConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public batchGetRepositoryScanningConfiguration( + args: BatchGetRepositoryScanningConfigurationCommandInput, + cb: (err: any, data?: BatchGetRepositoryScanningConfigurationCommandOutput) => void + ): void; + public batchGetRepositoryScanningConfiguration( + args: BatchGetRepositoryScanningConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: BatchGetRepositoryScanningConfigurationCommandOutput) => void + ): void; + public batchGetRepositoryScanningConfiguration( + args: BatchGetRepositoryScanningConfigurationCommandInput, + optionsOrCb?: + | __HttpHandlerOptions + | ((err: any, data?: BatchGetRepositoryScanningConfigurationCommandOutput) => void), + cb?: (err: any, data?: BatchGetRepositoryScanningConfigurationCommandOutput) => void + ): Promise | void { + const command = new BatchGetRepositoryScanningConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Informs Amazon ECR that the image layer upload has completed for a specified registry, * repository name, and upload ID. You can optionally provide a sha256 digest @@ -328,6 +392,39 @@ export class ECR extends ECRClient { } } + /** + *

                                  Creates a pull through cache rule. A pull through cache rule provides a way to cache + * images from an external public registry in your Amazon ECR private registry.

                                  + */ + public createPullThroughCacheRule( + args: CreatePullThroughCacheRuleCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createPullThroughCacheRule( + args: CreatePullThroughCacheRuleCommandInput, + cb: (err: any, data?: CreatePullThroughCacheRuleCommandOutput) => void + ): void; + public createPullThroughCacheRule( + args: CreatePullThroughCacheRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreatePullThroughCacheRuleCommandOutput) => void + ): void; + public createPullThroughCacheRule( + args: CreatePullThroughCacheRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreatePullThroughCacheRuleCommandOutput) => void), + cb?: (err: any, data?: CreatePullThroughCacheRuleCommandOutput) => void + ): Promise | void { + const command = new CreatePullThroughCacheRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Creates a repository. For more information, see Amazon ECR repositories in the * Amazon Elastic Container Registry User Guide.

                                  @@ -393,6 +490,38 @@ export class ECR extends ECRClient { } } + /** + *

                                  Deletes a pull through cache rule.

                                  + */ + public deletePullThroughCacheRule( + args: DeletePullThroughCacheRuleCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deletePullThroughCacheRule( + args: DeletePullThroughCacheRuleCommandInput, + cb: (err: any, data?: DeletePullThroughCacheRuleCommandOutput) => void + ): void; + public deletePullThroughCacheRule( + args: DeletePullThroughCacheRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeletePullThroughCacheRuleCommandOutput) => void + ): void; + public deletePullThroughCacheRule( + args: DeletePullThroughCacheRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeletePullThroughCacheRuleCommandOutput) => void), + cb?: (err: any, data?: DeletePullThroughCacheRuleCommandOutput) => void + ): Promise | void { + const command = new DeletePullThroughCacheRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Deletes the registry permissions policy.

                                  */ @@ -593,6 +722,38 @@ export class ECR extends ECRClient { } } + /** + *

                                  Returns the pull through cache rules for a registry.

                                  + */ + public describePullThroughCacheRules( + args: DescribePullThroughCacheRulesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describePullThroughCacheRules( + args: DescribePullThroughCacheRulesCommandInput, + cb: (err: any, data?: DescribePullThroughCacheRulesCommandOutput) => void + ): void; + public describePullThroughCacheRules( + args: DescribePullThroughCacheRulesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribePullThroughCacheRulesCommandOutput) => void + ): void; + public describePullThroughCacheRules( + args: DescribePullThroughCacheRulesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribePullThroughCacheRulesCommandOutput) => void), + cb?: (err: any, data?: DescribePullThroughCacheRulesCommandOutput) => void + ): Promise | void { + const command = new DescribePullThroughCacheRulesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Describes the settings for a registry. The replication configuration for a repository * can be created or updated with the PutReplicationConfiguration API @@ -834,6 +995,38 @@ export class ECR extends ECRClient { } } + /** + *

                                  Retrieves the scanning configuration for a registry.

                                  + */ + public getRegistryScanningConfiguration( + args: GetRegistryScanningConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getRegistryScanningConfiguration( + args: GetRegistryScanningConfigurationCommandInput, + cb: (err: any, data?: GetRegistryScanningConfigurationCommandOutput) => void + ): void; + public getRegistryScanningConfiguration( + args: GetRegistryScanningConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetRegistryScanningConfigurationCommandOutput) => void + ): void; + public getRegistryScanningConfiguration( + args: GetRegistryScanningConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetRegistryScanningConfigurationCommandOutput) => void), + cb?: (err: any, data?: GetRegistryScanningConfigurationCommandOutput) => void + ): Promise | void { + const command = new GetRegistryScanningConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Retrieves the repository policy for the specified repository.

                                  */ @@ -1137,6 +1330,38 @@ export class ECR extends ECRClient { } } + /** + *

                                  Creates or updates the scanning configuration for your private registry.

                                  + */ + public putRegistryScanningConfiguration( + args: PutRegistryScanningConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putRegistryScanningConfiguration( + args: PutRegistryScanningConfigurationCommandInput, + cb: (err: any, data?: PutRegistryScanningConfigurationCommandOutput) => void + ): void; + public putRegistryScanningConfiguration( + args: PutRegistryScanningConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutRegistryScanningConfigurationCommandOutput) => void + ): void; + public putRegistryScanningConfiguration( + args: PutRegistryScanningConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: PutRegistryScanningConfigurationCommandOutput) => void), + cb?: (err: any, data?: PutRegistryScanningConfigurationCommandOutput) => void + ): Promise | void { + const command = new PutRegistryScanningConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Creates or updates the replication configuration for a registry. The existing * replication configuration for a repository can be retrieved with the DescribeRegistry API action. The first time the diff --git a/clients/client-ecr/src/ECRClient.ts b/clients/client-ecr/src/ECRClient.ts index 0f414965b515..c93b63d023a0 100644 --- a/clients/client-ecr/src/ECRClient.ts +++ b/clients/client-ecr/src/ECRClient.ts @@ -55,15 +55,27 @@ import { } from "./commands/BatchCheckLayerAvailabilityCommand"; import { BatchDeleteImageCommandInput, BatchDeleteImageCommandOutput } from "./commands/BatchDeleteImageCommand"; import { BatchGetImageCommandInput, BatchGetImageCommandOutput } from "./commands/BatchGetImageCommand"; +import { + BatchGetRepositoryScanningConfigurationCommandInput, + BatchGetRepositoryScanningConfigurationCommandOutput, +} from "./commands/BatchGetRepositoryScanningConfigurationCommand"; import { CompleteLayerUploadCommandInput, CompleteLayerUploadCommandOutput, } from "./commands/CompleteLayerUploadCommand"; +import { + CreatePullThroughCacheRuleCommandInput, + CreatePullThroughCacheRuleCommandOutput, +} from "./commands/CreatePullThroughCacheRuleCommand"; import { CreateRepositoryCommandInput, CreateRepositoryCommandOutput } from "./commands/CreateRepositoryCommand"; import { DeleteLifecyclePolicyCommandInput, DeleteLifecyclePolicyCommandOutput, } from "./commands/DeleteLifecyclePolicyCommand"; +import { + DeletePullThroughCacheRuleCommandInput, + DeletePullThroughCacheRuleCommandOutput, +} from "./commands/DeletePullThroughCacheRuleCommand"; import { DeleteRegistryPolicyCommandInput, DeleteRegistryPolicyCommandOutput, @@ -82,6 +94,10 @@ import { DescribeImageScanFindingsCommandOutput, } from "./commands/DescribeImageScanFindingsCommand"; import { DescribeImagesCommandInput, DescribeImagesCommandOutput } from "./commands/DescribeImagesCommand"; +import { + DescribePullThroughCacheRulesCommandInput, + DescribePullThroughCacheRulesCommandOutput, +} from "./commands/DescribePullThroughCacheRulesCommand"; import { DescribeRegistryCommandInput, DescribeRegistryCommandOutput } from "./commands/DescribeRegistryCommand"; import { DescribeRepositoriesCommandInput, @@ -101,6 +117,10 @@ import { GetLifecyclePolicyPreviewCommandOutput, } from "./commands/GetLifecyclePolicyPreviewCommand"; import { GetRegistryPolicyCommandInput, GetRegistryPolicyCommandOutput } from "./commands/GetRegistryPolicyCommand"; +import { + GetRegistryScanningConfigurationCommandInput, + GetRegistryScanningConfigurationCommandOutput, +} from "./commands/GetRegistryScanningConfigurationCommand"; import { GetRepositoryPolicyCommandInput, GetRepositoryPolicyCommandOutput, @@ -125,6 +145,10 @@ import { } from "./commands/PutImageTagMutabilityCommand"; import { PutLifecyclePolicyCommandInput, PutLifecyclePolicyCommandOutput } from "./commands/PutLifecyclePolicyCommand"; import { PutRegistryPolicyCommandInput, PutRegistryPolicyCommandOutput } from "./commands/PutRegistryPolicyCommand"; +import { + PutRegistryScanningConfigurationCommandInput, + PutRegistryScanningConfigurationCommandOutput, +} from "./commands/PutRegistryScanningConfigurationCommand"; import { PutReplicationConfigurationCommandInput, PutReplicationConfigurationCommandOutput, @@ -147,15 +171,19 @@ export type ServiceInputTypes = | BatchCheckLayerAvailabilityCommandInput | BatchDeleteImageCommandInput | BatchGetImageCommandInput + | BatchGetRepositoryScanningConfigurationCommandInput | CompleteLayerUploadCommandInput + | CreatePullThroughCacheRuleCommandInput | CreateRepositoryCommandInput | DeleteLifecyclePolicyCommandInput + | DeletePullThroughCacheRuleCommandInput | DeleteRegistryPolicyCommandInput | DeleteRepositoryCommandInput | DeleteRepositoryPolicyCommandInput | DescribeImageReplicationStatusCommandInput | DescribeImageScanFindingsCommandInput | DescribeImagesCommandInput + | DescribePullThroughCacheRulesCommandInput | DescribeRegistryCommandInput | DescribeRepositoriesCommandInput | GetAuthorizationTokenCommandInput @@ -163,6 +191,7 @@ export type ServiceInputTypes = | GetLifecyclePolicyCommandInput | GetLifecyclePolicyPreviewCommandInput | GetRegistryPolicyCommandInput + | GetRegistryScanningConfigurationCommandInput | GetRepositoryPolicyCommandInput | InitiateLayerUploadCommandInput | ListImagesCommandInput @@ -172,6 +201,7 @@ export type ServiceInputTypes = | PutImageTagMutabilityCommandInput | PutLifecyclePolicyCommandInput | PutRegistryPolicyCommandInput + | PutRegistryScanningConfigurationCommandInput | PutReplicationConfigurationCommandInput | SetRepositoryPolicyCommandInput | StartImageScanCommandInput @@ -184,15 +214,19 @@ export type ServiceOutputTypes = | BatchCheckLayerAvailabilityCommandOutput | BatchDeleteImageCommandOutput | BatchGetImageCommandOutput + | BatchGetRepositoryScanningConfigurationCommandOutput | CompleteLayerUploadCommandOutput + | CreatePullThroughCacheRuleCommandOutput | CreateRepositoryCommandOutput | DeleteLifecyclePolicyCommandOutput + | DeletePullThroughCacheRuleCommandOutput | DeleteRegistryPolicyCommandOutput | DeleteRepositoryCommandOutput | DeleteRepositoryPolicyCommandOutput | DescribeImageReplicationStatusCommandOutput | DescribeImageScanFindingsCommandOutput | DescribeImagesCommandOutput + | DescribePullThroughCacheRulesCommandOutput | DescribeRegistryCommandOutput | DescribeRepositoriesCommandOutput | GetAuthorizationTokenCommandOutput @@ -200,6 +234,7 @@ export type ServiceOutputTypes = | GetLifecyclePolicyCommandOutput | GetLifecyclePolicyPreviewCommandOutput | GetRegistryPolicyCommandOutput + | GetRegistryScanningConfigurationCommandOutput | GetRepositoryPolicyCommandOutput | InitiateLayerUploadCommandOutput | ListImagesCommandOutput @@ -209,6 +244,7 @@ export type ServiceOutputTypes = | PutImageTagMutabilityCommandOutput | PutLifecyclePolicyCommandOutput | PutRegistryPolicyCommandOutput + | PutRegistryScanningConfigurationCommandOutput | PutReplicationConfigurationCommandOutput | SetRepositoryPolicyCommandOutput | StartImageScanCommandOutput diff --git a/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts b/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts new file mode 100644 index 000000000000..c8666f604db8 --- /dev/null +++ b/clients/client-ecr/src/commands/BatchGetRepositoryScanningConfigurationCommand.ts @@ -0,0 +1,110 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { + BatchGetRepositoryScanningConfigurationRequest, + BatchGetRepositoryScanningConfigurationResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand, + serializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand, +} from "../protocols/Aws_json1_1"; + +export interface BatchGetRepositoryScanningConfigurationCommandInput + extends BatchGetRepositoryScanningConfigurationRequest {} +export interface BatchGetRepositoryScanningConfigurationCommandOutput + extends BatchGetRepositoryScanningConfigurationResponse, + __MetadataBearer {} + +/** + *

                                  Gets the scanning configuration for one or more repositories.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, BatchGetRepositoryScanningConfigurationCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, BatchGetRepositoryScanningConfigurationCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new BatchGetRepositoryScanningConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link BatchGetRepositoryScanningConfigurationCommandInput} for command's `input` shape. + * @see {@link BatchGetRepositoryScanningConfigurationCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class BatchGetRepositoryScanningConfigurationCommand extends $Command< + BatchGetRepositoryScanningConfigurationCommandInput, + BatchGetRepositoryScanningConfigurationCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchGetRepositoryScanningConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler< + BatchGetRepositoryScanningConfigurationCommandInput, + BatchGetRepositoryScanningConfigurationCommandOutput + > { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "BatchGetRepositoryScanningConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: BatchGetRepositoryScanningConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: BatchGetRepositoryScanningConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: BatchGetRepositoryScanningConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/CreatePullThroughCacheRuleCommand.ts b/clients/client-ecr/src/commands/CreatePullThroughCacheRuleCommand.ts new file mode 100644 index 000000000000..e7e8042bb2b2 --- /dev/null +++ b/clients/client-ecr/src/commands/CreatePullThroughCacheRuleCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { CreatePullThroughCacheRuleRequest, CreatePullThroughCacheRuleResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreatePullThroughCacheRuleCommand, + serializeAws_json1_1CreatePullThroughCacheRuleCommand, +} from "../protocols/Aws_json1_1"; + +export interface CreatePullThroughCacheRuleCommandInput extends CreatePullThroughCacheRuleRequest {} +export interface CreatePullThroughCacheRuleCommandOutput extends CreatePullThroughCacheRuleResponse, __MetadataBearer {} + +/** + *

                                  Creates a pull through cache rule. A pull through cache rule provides a way to cache + * images from an external public registry in your Amazon ECR private registry.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, CreatePullThroughCacheRuleCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, CreatePullThroughCacheRuleCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new CreatePullThroughCacheRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreatePullThroughCacheRuleCommandInput} for command's `input` shape. + * @see {@link CreatePullThroughCacheRuleCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class CreatePullThroughCacheRuleCommand extends $Command< + CreatePullThroughCacheRuleCommandInput, + CreatePullThroughCacheRuleCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreatePullThroughCacheRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "CreatePullThroughCacheRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreatePullThroughCacheRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreatePullThroughCacheRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreatePullThroughCacheRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreatePullThroughCacheRuleCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1CreatePullThroughCacheRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/DeletePullThroughCacheRuleCommand.ts b/clients/client-ecr/src/commands/DeletePullThroughCacheRuleCommand.ts new file mode 100644 index 000000000000..2478f91d23d7 --- /dev/null +++ b/clients/client-ecr/src/commands/DeletePullThroughCacheRuleCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { DeletePullThroughCacheRuleRequest, DeletePullThroughCacheRuleResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DeletePullThroughCacheRuleCommand, + serializeAws_json1_1DeletePullThroughCacheRuleCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeletePullThroughCacheRuleCommandInput extends DeletePullThroughCacheRuleRequest {} +export interface DeletePullThroughCacheRuleCommandOutput extends DeletePullThroughCacheRuleResponse, __MetadataBearer {} + +/** + *

                                  Deletes a pull through cache rule.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, DeletePullThroughCacheRuleCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, DeletePullThroughCacheRuleCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new DeletePullThroughCacheRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeletePullThroughCacheRuleCommandInput} for command's `input` shape. + * @see {@link DeletePullThroughCacheRuleCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class DeletePullThroughCacheRuleCommand extends $Command< + DeletePullThroughCacheRuleCommandInput, + DeletePullThroughCacheRuleCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeletePullThroughCacheRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "DeletePullThroughCacheRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeletePullThroughCacheRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeletePullThroughCacheRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeletePullThroughCacheRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeletePullThroughCacheRuleCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DeletePullThroughCacheRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/DescribePullThroughCacheRulesCommand.ts b/clients/client-ecr/src/commands/DescribePullThroughCacheRulesCommand.ts new file mode 100644 index 000000000000..c945c5da9671 --- /dev/null +++ b/clients/client-ecr/src/commands/DescribePullThroughCacheRulesCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { DescribePullThroughCacheRulesRequest, DescribePullThroughCacheRulesResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DescribePullThroughCacheRulesCommand, + serializeAws_json1_1DescribePullThroughCacheRulesCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribePullThroughCacheRulesCommandInput extends DescribePullThroughCacheRulesRequest {} +export interface DescribePullThroughCacheRulesCommandOutput + extends DescribePullThroughCacheRulesResponse, + __MetadataBearer {} + +/** + *

                                  Returns the pull through cache rules for a registry.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, DescribePullThroughCacheRulesCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, DescribePullThroughCacheRulesCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new DescribePullThroughCacheRulesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribePullThroughCacheRulesCommandInput} for command's `input` shape. + * @see {@link DescribePullThroughCacheRulesCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class DescribePullThroughCacheRulesCommand extends $Command< + DescribePullThroughCacheRulesCommandInput, + DescribePullThroughCacheRulesCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribePullThroughCacheRulesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "DescribePullThroughCacheRulesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribePullThroughCacheRulesRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribePullThroughCacheRulesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribePullThroughCacheRulesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DescribePullThroughCacheRulesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DescribePullThroughCacheRulesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/GetRegistryScanningConfigurationCommand.ts b/clients/client-ecr/src/commands/GetRegistryScanningConfigurationCommand.ts new file mode 100644 index 000000000000..72365339c5ca --- /dev/null +++ b/clients/client-ecr/src/commands/GetRegistryScanningConfigurationCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { GetRegistryScanningConfigurationRequest, GetRegistryScanningConfigurationResponse } from "../models/models_0"; +import { + deserializeAws_json1_1GetRegistryScanningConfigurationCommand, + serializeAws_json1_1GetRegistryScanningConfigurationCommand, +} from "../protocols/Aws_json1_1"; + +export interface GetRegistryScanningConfigurationCommandInput extends GetRegistryScanningConfigurationRequest {} +export interface GetRegistryScanningConfigurationCommandOutput + extends GetRegistryScanningConfigurationResponse, + __MetadataBearer {} + +/** + *

                                  Retrieves the scanning configuration for a registry.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, GetRegistryScanningConfigurationCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, GetRegistryScanningConfigurationCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new GetRegistryScanningConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetRegistryScanningConfigurationCommandInput} for command's `input` shape. + * @see {@link GetRegistryScanningConfigurationCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class GetRegistryScanningConfigurationCommand extends $Command< + GetRegistryScanningConfigurationCommandInput, + GetRegistryScanningConfigurationCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetRegistryScanningConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "GetRegistryScanningConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetRegistryScanningConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetRegistryScanningConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetRegistryScanningConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1GetRegistryScanningConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1GetRegistryScanningConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/PutRegistryScanningConfigurationCommand.ts b/clients/client-ecr/src/commands/PutRegistryScanningConfigurationCommand.ts new file mode 100644 index 000000000000..3f39a8c6b9de --- /dev/null +++ b/clients/client-ecr/src/commands/PutRegistryScanningConfigurationCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ECRClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../ECRClient"; +import { PutRegistryScanningConfigurationRequest, PutRegistryScanningConfigurationResponse } from "../models/models_0"; +import { + deserializeAws_json1_1PutRegistryScanningConfigurationCommand, + serializeAws_json1_1PutRegistryScanningConfigurationCommand, +} from "../protocols/Aws_json1_1"; + +export interface PutRegistryScanningConfigurationCommandInput extends PutRegistryScanningConfigurationRequest {} +export interface PutRegistryScanningConfigurationCommandOutput + extends PutRegistryScanningConfigurationResponse, + __MetadataBearer {} + +/** + *

                                  Creates or updates the scanning configuration for your private registry.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { ECRClient, PutRegistryScanningConfigurationCommand } from "@aws-sdk/client-ecr"; // ES Modules import + * // const { ECRClient, PutRegistryScanningConfigurationCommand } = require("@aws-sdk/client-ecr"); // CommonJS import + * const client = new ECRClient(config); + * const command = new PutRegistryScanningConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutRegistryScanningConfigurationCommandInput} for command's `input` shape. + * @see {@link PutRegistryScanningConfigurationCommandOutput} for command's `response` shape. + * @see {@link ECRClientResolvedConfig | config} for ECRClient's `config` shape. + * + */ +export class PutRegistryScanningConfigurationCommand extends $Command< + PutRegistryScanningConfigurationCommandInput, + PutRegistryScanningConfigurationCommandOutput, + ECRClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutRegistryScanningConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: ECRClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "ECRClient"; + const commandName = "PutRegistryScanningConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutRegistryScanningConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: PutRegistryScanningConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: PutRegistryScanningConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1PutRegistryScanningConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1PutRegistryScanningConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-ecr/src/commands/index.ts b/clients/client-ecr/src/commands/index.ts index cd1bf6bd478e..d291fec7e7d3 100644 --- a/clients/client-ecr/src/commands/index.ts +++ b/clients/client-ecr/src/commands/index.ts @@ -1,15 +1,19 @@ export * from "./BatchCheckLayerAvailabilityCommand"; export * from "./BatchDeleteImageCommand"; export * from "./BatchGetImageCommand"; +export * from "./BatchGetRepositoryScanningConfigurationCommand"; export * from "./CompleteLayerUploadCommand"; +export * from "./CreatePullThroughCacheRuleCommand"; export * from "./CreateRepositoryCommand"; export * from "./DeleteLifecyclePolicyCommand"; +export * from "./DeletePullThroughCacheRuleCommand"; export * from "./DeleteRegistryPolicyCommand"; export * from "./DeleteRepositoryCommand"; export * from "./DeleteRepositoryPolicyCommand"; export * from "./DescribeImageReplicationStatusCommand"; export * from "./DescribeImageScanFindingsCommand"; export * from "./DescribeImagesCommand"; +export * from "./DescribePullThroughCacheRulesCommand"; export * from "./DescribeRegistryCommand"; export * from "./DescribeRepositoriesCommand"; export * from "./GetAuthorizationTokenCommand"; @@ -17,6 +21,7 @@ export * from "./GetDownloadUrlForLayerCommand"; export * from "./GetLifecyclePolicyCommand"; export * from "./GetLifecyclePolicyPreviewCommand"; export * from "./GetRegistryPolicyCommand"; +export * from "./GetRegistryScanningConfigurationCommand"; export * from "./GetRepositoryPolicyCommand"; export * from "./InitiateLayerUploadCommand"; export * from "./ListImagesCommand"; @@ -26,6 +31,7 @@ export * from "./PutImageScanningConfigurationCommand"; export * from "./PutImageTagMutabilityCommand"; export * from "./PutLifecyclePolicyCommand"; export * from "./PutRegistryPolicyCommand"; +export * from "./PutRegistryScanningConfigurationCommand"; export * from "./PutReplicationConfigurationCommand"; export * from "./SetRepositoryPolicyCommand"; export * from "./StartImageScanCommand"; diff --git a/clients/client-ecr/src/models/models_0.ts b/clients/client-ecr/src/models/models_0.ts index a6a71ae67c90..b7055b646926 100644 --- a/clients/client-ecr/src/models/models_0.ts +++ b/clients/client-ecr/src/models/models_0.ts @@ -404,6 +404,168 @@ export namespace BatchGetImageResponse { }); } +export interface BatchGetRepositoryScanningConfigurationRequest { + /** + *

                                  One or more repository names to get the scanning configuration for.

                                  + */ + repositoryNames: string[] | undefined; +} + +export namespace BatchGetRepositoryScanningConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetRepositoryScanningConfigurationRequest): any => ({ + ...obj, + }); +} + +export enum ScanningConfigurationFailureCode { + REPOSITORY_NOT_FOUND = "REPOSITORY_NOT_FOUND", +} + +/** + *

                                  The details about any failures associated with the scanning configuration of a + * repository.

                                  + */ +export interface RepositoryScanningConfigurationFailure { + /** + *

                                  The name of the repository.

                                  + */ + repositoryName?: string; + + /** + *

                                  The failure code.

                                  + */ + failureCode?: ScanningConfigurationFailureCode | string; + + /** + *

                                  The reason for the failure.

                                  + */ + failureReason?: string; +} + +export namespace RepositoryScanningConfigurationFailure { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RepositoryScanningConfigurationFailure): any => ({ + ...obj, + }); +} + +export enum ScanningRepositoryFilterType { + WILDCARD = "WILDCARD", +} + +/** + *

                                  The details of a scanning repository filter.

                                  + */ +export interface ScanningRepositoryFilter { + /** + *

                                  The filter to use when scanning.

                                  + */ + filter: string | undefined; + + /** + *

                                  The type associated with the filter.

                                  + */ + filterType: ScanningRepositoryFilterType | string | undefined; +} + +export namespace ScanningRepositoryFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScanningRepositoryFilter): any => ({ + ...obj, + }); +} + +export enum ScanFrequency { + CONTINUOUS_SCAN = "CONTINUOUS_SCAN", + MANUAL = "MANUAL", + SCAN_ON_PUSH = "SCAN_ON_PUSH", +} + +/** + *

                                  The details of the scanning configuration for a repository.

                                  + */ +export interface RepositoryScanningConfiguration { + /** + *

                                  The ARN of the repository.

                                  + */ + repositoryArn?: string; + + /** + *

                                  The name of the repository.

                                  + */ + repositoryName?: string; + + /** + *

                                  Whether or not scan on push is configured for the repository.

                                  + */ + scanOnPush?: boolean; + + /** + *

                                  The scan frequency for the repository.

                                  + */ + scanFrequency?: ScanFrequency | string; + + /** + *

                                  The scan filters applied to the repository.

                                  + */ + appliedScanFilters?: ScanningRepositoryFilter[]; +} + +export namespace RepositoryScanningConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RepositoryScanningConfiguration): any => ({ + ...obj, + }); +} + +export interface BatchGetRepositoryScanningConfigurationResponse { + /** + *

                                  The scanning configuration for the requested repositories.

                                  + */ + scanningConfigurations?: RepositoryScanningConfiguration[]; + + /** + *

                                  Any failures associated with the call.

                                  + */ + failures?: RepositoryScanningConfigurationFailure[]; +} + +export namespace BatchGetRepositoryScanningConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetRepositoryScanningConfigurationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  There was an exception validating this request.

                                  + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message?: string; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + export interface CompleteLayerUploadRequest { /** *

                                  The Amazon Web Services account ID associated with the registry to which to upload layers. @@ -597,6 +759,126 @@ export namespace UploadNotFoundException { }); } +export interface CreatePullThroughCacheRuleRequest { + /** + *

                                  The repository name prefix to use when caching images from the source registry.

                                  + */ + ecrRepositoryPrefix: string | undefined; + + /** + *

                                  The registry URL of the upstream public registry to use as the source for the pull + * through cache rule.

                                  + */ + upstreamRegistryUrl: string | undefined; + + /** + *

                                  The Amazon Web Services account ID associated with the registry to create the pull through cache + * rule for. If you do not specify a registry, the default registry is assumed.

                                  + */ + registryId?: string; +} + +export namespace CreatePullThroughCacheRuleRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePullThroughCacheRuleRequest): any => ({ + ...obj, + }); +} + +export interface CreatePullThroughCacheRuleResponse { + /** + *

                                  The Amazon ECR repository prefix associated with the pull through cache rule.

                                  + */ + ecrRepositoryPrefix?: string; + + /** + *

                                  The upstream registry URL associated with the pull through cache rule.

                                  + */ + upstreamRegistryUrl?: string; + + /** + *

                                  The date and time, in JavaScript date format, when the pull through cache rule was + * created.

                                  + */ + createdAt?: Date; + + /** + *

                                  The registry ID associated with the request.

                                  + */ + registryId?: string; +} + +export namespace CreatePullThroughCacheRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePullThroughCacheRuleResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  The operation did not succeed because it would have exceeded a service limit for your + * account. For more information, see Amazon ECR service quotas in + * the Amazon Elastic Container Registry User Guide.

                                  + */ +export interface LimitExceededException extends __SmithyException, $MetadataBearer { + name: "LimitExceededException"; + $fault: "client"; + /** + *

                                  The error message associated with the exception.

                                  + */ + message?: string; +} + +export namespace LimitExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LimitExceededException): any => ({ + ...obj, + }); +} + +/** + *

                                  A pull through cache rule with these settings already exists for the private + * registry.

                                  + */ +export interface PullThroughCacheRuleAlreadyExistsException extends __SmithyException, $MetadataBearer { + name: "PullThroughCacheRuleAlreadyExistsException"; + $fault: "client"; + message?: string; +} + +export namespace PullThroughCacheRuleAlreadyExistsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PullThroughCacheRuleAlreadyExistsException): any => ({ + ...obj, + }); +} + +/** + *

                                  The specified upstream registry isn't supported.

                                  + */ +export interface UnsupportedUpstreamRegistryException extends __SmithyException, $MetadataBearer { + name: "UnsupportedUpstreamRegistryException"; + $fault: "client"; + message?: string; +} + +export namespace UnsupportedUpstreamRegistryException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UnsupportedUpstreamRegistryException): any => ({ + ...obj, + }); +} + export enum EncryptionType { AES256 = "AES256", KMS = "KMS", @@ -623,12 +905,12 @@ export interface EncryptionConfiguration { * for Amazon ECR, or specify your own KMS key, which you already created. For more * information, see Protecting data using server-side * encryption with an KMS key stored in Key Management Service (SSE-KMS) in the - * Amazon Simple Storage Service Console Developer Guide..

                                  + * Amazon Simple Storage Service Console Developer Guide.

                                  *

                                  If you use the AES256 encryption type, Amazon ECR uses server-side encryption * with Amazon S3-managed encryption keys which encrypts the images in the repository using an * AES-256 encryption algorithm. For more information, see Protecting data using * server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the - * Amazon Simple Storage Service Console Developer Guide..

                                  + * Amazon Simple Storage Service Console Developer Guide.

                                  */ encryptionType: EncryptionType | string | undefined; @@ -708,7 +990,7 @@ export namespace Tag { export interface CreateRepositoryRequest { /** - *

                                  The AWS account ID associated with the registry to create the repository. + *

                                  The Amazon Web Services account ID associated with the registry to create the repository. * If you do not specify a registry, the default registry is assumed.

                                  */ registryId?: string; @@ -851,29 +1133,6 @@ export namespace InvalidTagParameterException { }); } -/** - *

                                  The operation did not succeed because it would have exceeded a service limit for your - * account. For more information, see Amazon ECR service quotas in - * the Amazon Elastic Container Registry User Guide.

                                  - */ -export interface LimitExceededException extends __SmithyException, $MetadataBearer { - name: "LimitExceededException"; - $fault: "client"; - /** - *

                                  The error message associated with the exception.

                                  - */ - message?: string; -} - -export namespace LimitExceededException { - /** - * @internal - */ - export const filterSensitiveLog = (obj: LimitExceededException): any => ({ - ...obj, - }); -} - /** *

                                  The specified repository already exists in the specified registry.

                                  */ @@ -986,70 +1245,125 @@ export namespace LifecyclePolicyNotFoundException { }); } -export interface DeleteRegistryPolicyRequest {} +export interface DeletePullThroughCacheRuleRequest { + /** + *

                                  The Amazon ECR repository prefix associated with the pull through cache rule to + * delete.

                                  + */ + ecrRepositoryPrefix: string | undefined; -export namespace DeleteRegistryPolicyRequest { + /** + *

                                  The Amazon Web Services account ID associated with the registry that contains the pull through cache + * rule. If you do not specify a registry, the default registry is assumed.

                                  + */ + registryId?: string; +} + +export namespace DeletePullThroughCacheRuleRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteRegistryPolicyRequest): any => ({ + export const filterSensitiveLog = (obj: DeletePullThroughCacheRuleRequest): any => ({ ...obj, }); } -export interface DeleteRegistryPolicyResponse { +export interface DeletePullThroughCacheRuleResponse { /** - *

                                  The registry ID associated with the request.

                                  + *

                                  The Amazon ECR repository prefix associated with the request.

                                  */ - registryId?: string; + ecrRepositoryPrefix?: string; /** - *

                                  The contents of the registry permissions policy that was deleted.

                                  + *

                                  The upstream registry URL associated with the pull through cache rule.

                                  */ - policyText?: string; -} + upstreamRegistryUrl?: string; -export namespace DeleteRegistryPolicyResponse { /** - * @internal + *

                                  The timestamp associated with the pull through cache rule.

                                  */ - export const filterSensitiveLog = (obj: DeleteRegistryPolicyResponse): any => ({ - ...obj, - }); -} + createdAt?: Date; + + /** + *

                                  The registry ID associated with the request.

                                  + */ + registryId?: string; +} + +export namespace DeletePullThroughCacheRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePullThroughCacheRuleResponse): any => ({ + ...obj, + }); +} /** - *

                                  The registry doesn't have an associated registry policy.

                                  + *

                                  The pull through cache rule was not found. Specify a valid pull through cache rule and + * try again.

                                  */ -export interface RegistryPolicyNotFoundException extends __SmithyException, $MetadataBearer { - name: "RegistryPolicyNotFoundException"; +export interface PullThroughCacheRuleNotFoundException extends __SmithyException, $MetadataBearer { + name: "PullThroughCacheRuleNotFoundException"; $fault: "client"; message?: string; } -export namespace RegistryPolicyNotFoundException { +export namespace PullThroughCacheRuleNotFoundException { /** * @internal */ - export const filterSensitiveLog = (obj: RegistryPolicyNotFoundException): any => ({ + export const filterSensitiveLog = (obj: PullThroughCacheRuleNotFoundException): any => ({ + ...obj, + }); +} + +export interface DeleteRegistryPolicyRequest {} + +export namespace DeleteRegistryPolicyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRegistryPolicyRequest): any => ({ + ...obj, + }); +} + +export interface DeleteRegistryPolicyResponse { + /** + *

                                  The registry ID associated with the request.

                                  + */ + registryId?: string; + + /** + *

                                  The contents of the registry permissions policy that was deleted.

                                  + */ + policyText?: string; +} + +export namespace DeleteRegistryPolicyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRegistryPolicyResponse): any => ({ ...obj, }); } /** - *

                                  There was an exception validating this request.

                                  + *

                                  The registry doesn't have an associated registry policy.

                                  */ -export interface ValidationException extends __SmithyException, $MetadataBearer { - name: "ValidationException"; +export interface RegistryPolicyNotFoundException extends __SmithyException, $MetadataBearer { + name: "RegistryPolicyNotFoundException"; $fault: "client"; message?: string; } -export namespace ValidationException { +export namespace RegistryPolicyNotFoundException { /** * @internal */ - export const filterSensitiveLog = (obj: ValidationException): any => ({ + export const filterSensitiveLog = (obj: RegistryPolicyNotFoundException): any => ({ ...obj, }); } @@ -1232,7 +1546,7 @@ export interface ImageReplicationStatus { region?: string; /** - *

                                  The AWS account ID associated with the registry to which the image belongs.

                                  + *

                                  The Amazon Web Services account ID associated with the registry to which the image belongs.

                                  */ registryId?: string; @@ -1423,9 +1737,14 @@ export namespace ImageScanFindingsSummary { } export enum ScanStatus { + ACTIVE = "ACTIVE", COMPLETE = "COMPLETE", FAILED = "FAILED", + FINDINGS_UNAVAILABLE = "FINDINGS_UNAVAILABLE", IN_PROGRESS = "IN_PROGRESS", + PENDING = "PENDING", + SCAN_ELIGIBILITY_EXPIRED = "SCAN_ELIGIBILITY_EXPIRED", + UNSUPPORTED_IMAGE = "UNSUPPORTED_IMAGE", } /** @@ -1458,145 +1777,622 @@ export namespace ImageScanStatus { */ export interface ImageDetail { /** - *

                                  The Amazon Web Services account ID associated with the registry to which this image belongs.

                                  + *

                                  The Amazon Web Services account ID associated with the registry to which this image belongs.

                                  + */ + registryId?: string; + + /** + *

                                  The name of the repository to which this image belongs.

                                  + */ + repositoryName?: string; + + /** + *

                                  The sha256 digest of the image manifest.

                                  + */ + imageDigest?: string; + + /** + *

                                  The list of tags associated with this image.

                                  + */ + imageTags?: string[]; + + /** + *

                                  The size, in bytes, of the image in the repository.

                                  + *

                                  If the image is a manifest list, this will be the max size of all manifests in the + * list.

                                  + * + *

                                  Beginning with Docker version 1.9, the Docker client compresses image layers + * before pushing them to a V2 Docker registry. The output of the docker + * images command shows the uncompressed image size, so it may return a + * larger image size than the image sizes returned by DescribeImages.

                                  + *
                                  + */ + imageSizeInBytes?: number; + + /** + *

                                  The date and time, expressed in standard JavaScript date format, at which the current + * image was pushed to the repository.

                                  + */ + imagePushedAt?: Date; + + /** + *

                                  The current state of the scan.

                                  + */ + imageScanStatus?: ImageScanStatus; + + /** + *

                                  A summary of the last completed image scan.

                                  + */ + imageScanFindingsSummary?: ImageScanFindingsSummary; + + /** + *

                                  The media type of the image manifest.

                                  + */ + imageManifestMediaType?: string; + + /** + *

                                  The artifact media type of the image.

                                  + */ + artifactMediaType?: string; +} + +export namespace ImageDetail { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImageDetail): any => ({ + ...obj, + }); +} + +export interface DescribeImagesResponse { + /** + *

                                  A list of ImageDetail objects that contain data about the + * image.

                                  + */ + imageDetails?: ImageDetail[]; + + /** + *

                                  The nextToken value to include in a future DescribeImages + * request. When the results of a DescribeImages request exceed + * maxResults, this value can be used to retrieve the next page of + * results. This value is null when there are no more results to + * return.

                                  + */ + nextToken?: string; +} + +export namespace DescribeImagesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeImagesResponse): any => ({ + ...obj, + }); +} + +export interface DescribeImageScanFindingsRequest { + /** + *

                                  The Amazon Web Services account ID associated with the registry that contains the repository in + * which to describe the image scan findings for. If you do not specify a registry, the default registry is assumed.

                                  + */ + registryId?: string; + + /** + *

                                  The repository for the image for which to describe the scan findings.

                                  + */ + repositoryName: string | undefined; + + /** + *

                                  An object with identifying information for an image in an Amazon ECR repository.

                                  + */ + imageId: ImageIdentifier | undefined; + + /** + *

                                  The nextToken value returned from a previous paginated + * DescribeImageScanFindings request where maxResults was + * used and the results exceeded the value of that parameter. Pagination continues from the + * end of the previous results that returned the nextToken value. This value + * is null when there are no more results to return.

                                  + */ + nextToken?: string; + + /** + *

                                  The maximum number of image scan results returned by + * DescribeImageScanFindings in paginated output. When this parameter is + * used, DescribeImageScanFindings only returns maxResults + * results in a single page along with a nextToken response element. The + * remaining results of the initial request can be seen by sending another + * DescribeImageScanFindings request with the returned + * nextToken value. This value can be between 1 and 1000. If this + * parameter is not used, then DescribeImageScanFindings returns up to 100 + * results and a nextToken value, if applicable.

                                  + */ + maxResults?: number; +} + +export namespace DescribeImageScanFindingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeImageScanFindingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  The CVSS score for a finding.

                                  + */ +export interface CvssScore { + /** + *

                                  The base CVSS score used for the finding.

                                  + */ + baseScore?: number; + + /** + *

                                  The vector string of the CVSS score.

                                  + */ + scoringVector?: string; + + /** + *

                                  The source of the CVSS score.

                                  + */ + source?: string; + + /** + *

                                  The version of CVSS used for the score.

                                  + */ + version?: string; +} + +export namespace CvssScore { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CvssScore): any => ({ + ...obj, + }); +} + +/** + *

                                  Information on the vulnerable package identified by a finding.

                                  + */ +export interface VulnerablePackage { + /** + *

                                  The architecture of the vulnerable package.

                                  + */ + arch?: string; + + /** + *

                                  The epoch of the vulnerable package.

                                  + */ + epoch?: number; + + /** + *

                                  The file path of the vulnerable package.

                                  + */ + filePath?: string; + + /** + *

                                  The name of the vulnerable package.

                                  + */ + name?: string; + + /** + *

                                  The package manager of the vulnerable package.

                                  + */ + packageManager?: string; + + /** + *

                                  The release of the vulnerable package.

                                  + */ + release?: string; + + /** + *

                                  The source layer hash of the vulnerable package.

                                  + */ + sourceLayerHash?: string; + + /** + *

                                  The version of the vulnerable package.

                                  + */ + version?: string; +} + +export namespace VulnerablePackage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VulnerablePackage): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about a package vulnerability finding.

                                  + */ +export interface PackageVulnerabilityDetails { + /** + *

                                  An object that contains details about the CVSS score of a finding.

                                  + */ + cvss?: CvssScore[]; + + /** + *

                                  One or more URLs that contain details about this vulnerability type.

                                  + */ + referenceUrls?: string[]; + + /** + *

                                  One or more vulnerabilities related to the one identified in this finding.

                                  + */ + relatedVulnerabilities?: string[]; + + /** + *

                                  The source of the vulnerability information.

                                  + */ + source?: string; + + /** + *

                                  A URL to the source of the vulnerability information.

                                  + */ + sourceUrl?: string; + + /** + *

                                  The date and time that this vulnerability was first added to the vendor's + * database.

                                  + */ + vendorCreatedAt?: Date; + + /** + *

                                  The severity the vendor has given to this vulnerability type.

                                  + */ + vendorSeverity?: string; + + /** + *

                                  The date and time the vendor last updated this vulnerability in their database.

                                  + */ + vendorUpdatedAt?: Date; + + /** + *

                                  The ID given to this vulnerability.

                                  + */ + vulnerabilityId?: string; + + /** + *

                                  The packages impacted by this vulnerability.

                                  + */ + vulnerablePackages?: VulnerablePackage[]; +} + +export namespace PackageVulnerabilityDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PackageVulnerabilityDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  Details about the recommended course of action to remediate the finding.

                                  + */ +export interface Recommendation { + /** + *

                                  The URL address to the CVE remediation recommendations.

                                  + */ + url?: string; + + /** + *

                                  The recommended course of action to remediate the finding.

                                  + */ + text?: string; +} + +export namespace Recommendation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Recommendation): any => ({ + ...obj, + }); +} + +/** + *

                                  Information on how to remediate a finding.

                                  + */ +export interface Remediation { + /** + *

                                  An object that contains information about the recommended course of action to + * remediate the finding.

                                  + */ + recommendation?: Recommendation; +} + +export namespace Remediation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Remediation): any => ({ + ...obj, + }); +} + +/** + *

                                  The image details of the Amazon ECR container image.

                                  + */ +export interface AwsEcrContainerImageDetails { + /** + *

                                  The architecture of the Amazon ECR container image.

                                  + */ + architecture?: string; + + /** + *

                                  The image author of the Amazon ECR container image.

                                  + */ + author?: string; + + /** + *

                                  The image hash of the Amazon ECR container image.

                                  + */ + imageHash?: string; + + /** + *

                                  The image tags attached to the Amazon ECR container image.

                                  + */ + imageTags?: string[]; + + /** + *

                                  The platform of the Amazon ECR container image.

                                  + */ + platform?: string; + + /** + *

                                  The date and time the Amazon ECR container image was pushed.

                                  + */ + pushedAt?: Date; + + /** + *

                                  The registry the Amazon ECR container image belongs to.

                                  + */ + registry?: string; + + /** + *

                                  The name of the repository the Amazon ECR container image resides in.

                                  + */ + repositoryName?: string; +} + +export namespace AwsEcrContainerImageDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AwsEcrContainerImageDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about the resource involved in the finding.

                                  + */ +export interface ResourceDetails { + /** + *

                                  An object that contains details about the Amazon ECR container image involved in the + * finding.

                                  + */ + awsEcrContainerImage?: AwsEcrContainerImageDetails; +} + +export namespace ResourceDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  Details about the resource involved in a finding.

                                  + */ +export interface Resource { + /** + *

                                  An object that contains details about the resource involved in a finding.

                                  + */ + details?: ResourceDetails; + + /** + *

                                  The ID of the resource.

                                  + */ + id?: string; + + /** + *

                                  The tags attached to the resource.

                                  + */ + tags?: { [key: string]: string }; + + /** + *

                                  The type of resource.

                                  + */ + type?: string; +} + +export namespace Resource { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Resource): any => ({ + ...obj, + }); +} + +/** + *

                                  Details on adjustments Amazon Inspector made to the CVSS score for a finding.

                                  + */ +export interface CvssScoreAdjustment { + /** + *

                                  The metric used to adjust the CVSS score.

                                  + */ + metric?: string; + + /** + *

                                  The reason the CVSS score has been adjustment.

                                  + */ + reason?: string; +} + +export namespace CvssScoreAdjustment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CvssScoreAdjustment): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about the CVSS score.

                                  + */ +export interface CvssScoreDetails { + /** + *

                                  An object that contains details about adjustment Amazon Inspector made to the CVSS score.

                                  + */ + adjustments?: CvssScoreAdjustment[]; + + /** + *

                                  The CVSS score.

                                  + */ + score?: number; + + /** + *

                                  The source for the CVSS score.

                                  + */ + scoreSource?: string; + + /** + *

                                  The vector for the CVSS score.

                                  + */ + scoringVector?: string; + + /** + *

                                  The CVSS version used in scoring.

                                  */ - registryId?: string; + version?: string; +} +export namespace CvssScoreDetails { /** - *

                                  The name of the repository to which this image belongs.

                                  + * @internal */ - repositoryName?: string; + export const filterSensitiveLog = (obj: CvssScoreDetails): any => ({ + ...obj, + }); +} +/** + *

                                  Information about the Amazon Inspector score given to a finding.

                                  + */ +export interface ScoreDetails { /** - *

                                  The sha256 digest of the image manifest.

                                  + *

                                  An object that contains details about the CVSS score given to a finding.

                                  */ - imageDigest?: string; + cvss?: CvssScoreDetails; +} +export namespace ScoreDetails { /** - *

                                  The list of tags associated with this image.

                                  + * @internal */ - imageTags?: string[]; + export const filterSensitiveLog = (obj: ScoreDetails): any => ({ + ...obj, + }); +} +/** + *

                                  The details of an enhanced image scan. This is returned when enhanced scanning is + * enabled for your private registry.

                                  + */ +export interface EnhancedImageScanFinding { /** - *

                                  The size, in bytes, of the image in the repository.

                                  - *

                                  If the image is a manifest list, this will be the max size of all manifests in the - * list.

                                  - * - *

                                  Beginning with Docker version 1.9, the Docker client compresses image layers - * before pushing them to a V2 Docker registry. The output of the docker - * images command shows the uncompressed image size, so it may return a - * larger image size than the image sizes returned by DescribeImages.

                                  - *
                                  + *

                                  The Amazon Web Services account ID associated with the image.

                                  */ - imageSizeInBytes?: number; + awsAccountId?: string; /** - *

                                  The date and time, expressed in standard JavaScript date format, at which the current - * image was pushed to the repository.

                                  + *

                                  The description of the finding.

                                  */ - imagePushedAt?: Date; + description?: string; /** - *

                                  The current state of the scan.

                                  + *

                                  The Amazon Resource Number (ARN) of the finding.

                                  */ - imageScanStatus?: ImageScanStatus; + findingArn?: string; /** - *

                                  A summary of the last completed image scan.

                                  + *

                                  The date and time that the finding was first observed.

                                  */ - imageScanFindingsSummary?: ImageScanFindingsSummary; + firstObservedAt?: Date; /** - *

                                  The media type of the image manifest.

                                  + *

                                  The date and time that the finding was last observed.

                                  */ - imageManifestMediaType?: string; + lastObservedAt?: Date; /** - *

                                  The artifact media type of the image.

                                  + *

                                  An object that contains the details of a package vulnerability finding.

                                  */ - artifactMediaType?: string; -} + packageVulnerabilityDetails?: PackageVulnerabilityDetails; -export namespace ImageDetail { /** - * @internal + *

                                  An object that contains the details about how to remediate a finding.

                                  */ - export const filterSensitiveLog = (obj: ImageDetail): any => ({ - ...obj, - }); -} + remediation?: Remediation; -export interface DescribeImagesResponse { /** - *

                                  A list of ImageDetail objects that contain data about the - * image.

                                  + *

                                  Contains information on the resources involved in a finding.

                                  */ - imageDetails?: ImageDetail[]; + resources?: Resource[]; /** - *

                                  The nextToken value to include in a future DescribeImages - * request. When the results of a DescribeImages request exceed - * maxResults, this value can be used to retrieve the next page of - * results. This value is null when there are no more results to - * return.

                                  + *

                                  The Amazon Inspector score given to the finding.

                                  */ - nextToken?: string; -} + score?: number; -export namespace DescribeImagesResponse { /** - * @internal + *

                                  An object that contains details of the Amazon Inspector score.

                                  */ - export const filterSensitiveLog = (obj: DescribeImagesResponse): any => ({ - ...obj, - }); -} + scoreDetails?: ScoreDetails; -export interface DescribeImageScanFindingsRequest { /** - *

                                  The Amazon Web Services account ID associated with the registry that contains the repository in - * which to describe the image scan findings for. If you do not specify a registry, the default registry is assumed.

                                  + *

                                  The severity of the finding.

                                  */ - registryId?: string; + severity?: string; /** - *

                                  The repository for the image for which to describe the scan findings.

                                  + *

                                  The status of the finding.

                                  */ - repositoryName: string | undefined; + status?: string; /** - *

                                  An object with identifying information for an image in an Amazon ECR repository.

                                  + *

                                  The title of the finding.

                                  */ - imageId: ImageIdentifier | undefined; + title?: string; /** - *

                                  The nextToken value returned from a previous paginated - * DescribeImageScanFindings request where maxResults was - * used and the results exceeded the value of that parameter. Pagination continues from the - * end of the previous results that returned the nextToken value. This value - * is null when there are no more results to return.

                                  + *

                                  The type of the finding.

                                  */ - nextToken?: string; + type?: string; /** - *

                                  The maximum number of image scan results returned by - * DescribeImageScanFindings in paginated output. When this parameter is - * used, DescribeImageScanFindings only returns maxResults - * results in a single page along with a nextToken response element. The - * remaining results of the initial request can be seen by sending another - * DescribeImageScanFindings request with the returned - * nextToken value. This value can be between 1 and 1000. If this - * parameter is not used, then DescribeImageScanFindings returns up to 100 - * results and a nextToken value, if applicable.

                                  + *

                                  The date and time the finding was last updated at.

                                  */ - maxResults?: number; + updatedAt?: Date; } -export namespace DescribeImageScanFindingsRequest { +export namespace EnhancedImageScanFinding { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeImageScanFindingsRequest): any => ({ + export const filterSensitiveLog = (obj: EnhancedImageScanFinding): any => ({ ...obj, }); } @@ -1678,15 +2474,20 @@ export interface ImageScanFindings { */ vulnerabilitySourceUpdatedAt?: Date; + /** + *

                                  The image vulnerability counts, sorted by severity.

                                  + */ + findingSeverityCounts?: { [key: string]: number }; + /** *

                                  The findings from the image scan.

                                  */ findings?: ImageScanFinding[]; /** - *

                                  The image vulnerability counts, sorted by severity.

                                  + *

                                  Details about the enhanced scan findings from Amazon Inspector.

                                  */ - findingSeverityCounts?: { [key: string]: number }; + enhancedFindings?: EnhancedImageScanFinding[]; } export namespace ImageScanFindings { @@ -1762,6 +2563,113 @@ export namespace ScanNotFoundException { }); } +export interface DescribePullThroughCacheRulesRequest { + /** + *

                                  The Amazon Web Services account ID associated with the registry to return the pull through cache + * rules for. If you do not specify a registry, the default registry is assumed.

                                  + */ + registryId?: string; + + /** + *

                                  The Amazon ECR repository prefixes associated with the pull through cache rules to return. + * If no repository prefix value is specified, all pull through cache rules are + * returned.

                                  + */ + ecrRepositoryPrefixes?: string[]; + + /** + *

                                  The nextToken value returned from a previous paginated + * DescribePullThroughCacheRulesRequest request where + * maxResults was used and the results exceeded the value of that + * parameter. Pagination continues from the end of the previous results that returned the + * nextToken value. This value is null when there are no more results to + * return.

                                  + */ + nextToken?: string; + + /** + *

                                  The maximum number of pull through cache rules returned by + * DescribePullThroughCacheRulesRequest in paginated output. When this + * parameter is used, DescribePullThroughCacheRulesRequest only returns + * maxResults results in a single page along with a nextToken + * response element. The remaining results of the initial request can be seen by sending + * another DescribePullThroughCacheRulesRequest request with the returned + * nextToken value. This value can be between 1 and 1000. If this + * parameter is not used, then DescribePullThroughCacheRulesRequest returns up + * to 100 results and a nextToken value, if applicable.

                                  + */ + maxResults?: number; +} + +export namespace DescribePullThroughCacheRulesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribePullThroughCacheRulesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  The details of a pull through cache rule.

                                  + */ +export interface PullThroughCacheRule { + /** + *

                                  The Amazon ECR repository prefix associated with the pull through cache rule.

                                  + */ + ecrRepositoryPrefix?: string; + + /** + *

                                  The upstream registry URL associated with the pull through cache rule.

                                  + */ + upstreamRegistryUrl?: string; + + /** + *

                                  The date and time the pull through cache was created.

                                  + */ + createdAt?: Date; + + /** + *

                                  The Amazon Web Services account ID associated with the registry the pull through cache rule is + * associated with.

                                  + */ + registryId?: string; +} + +export namespace PullThroughCacheRule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PullThroughCacheRule): any => ({ + ...obj, + }); +} + +export interface DescribePullThroughCacheRulesResponse { + /** + *

                                  The details of the pull through cache rules.

                                  + */ + pullThroughCacheRules?: PullThroughCacheRule[]; + + /** + *

                                  The nextToken value to include in a future + * DescribePullThroughCacheRulesRequest request. When the results of a + * DescribePullThroughCacheRulesRequest request exceed + * maxResults, this value can be used to retrieve the next page of + * results. This value is null when there are no more results to return.

                                  + */ + nextToken?: string; +} + +export namespace DescribePullThroughCacheRulesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribePullThroughCacheRulesResponse): any => ({ + ...obj, + }); +} + export interface DescribeRegistryRequest {} export namespace DescribeRegistryRequest { @@ -2460,6 +3368,92 @@ export namespace GetRegistryPolicyResponse { }); } +export interface GetRegistryScanningConfigurationRequest {} + +export namespace GetRegistryScanningConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRegistryScanningConfigurationRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  The details of a scanning rule for a private registry.

                                  + */ +export interface RegistryScanningRule { + /** + *

                                  The frequency that scans are performed at for a private registry.

                                  + */ + scanFrequency: ScanFrequency | string | undefined; + + /** + *

                                  The repository filters associated with the scanning configuration for a private + * registry.

                                  + */ + repositoryFilters: ScanningRepositoryFilter[] | undefined; +} + +export namespace RegistryScanningRule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegistryScanningRule): any => ({ + ...obj, + }); +} + +export enum ScanType { + BASIC = "BASIC", + ENHANCED = "ENHANCED", +} + +/** + *

                                  The scanning configuration for a private registry.

                                  + */ +export interface RegistryScanningConfiguration { + /** + *

                                  The type of scanning configured for the registry.

                                  + */ + scanType?: ScanType | string; + + /** + *

                                  The scanning rules associated with the registry.

                                  + */ + rules?: RegistryScanningRule[]; +} + +export namespace RegistryScanningConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RegistryScanningConfiguration): any => ({ + ...obj, + }); +} + +export interface GetRegistryScanningConfigurationResponse { + /** + *

                                  The ID of the registry.

                                  + */ + registryId?: string; + + /** + *

                                  The scanning configuration for the registry.

                                  + */ + scanningConfiguration?: RegistryScanningConfiguration; +} + +export namespace GetRegistryScanningConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRegistryScanningConfigurationResponse): any => ({ + ...obj, + }); +} + export interface GetRepositoryPolicyRequest { /** *

                                  The Amazon Web Services account ID associated with the registry that contains the repository. @@ -3029,6 +4023,50 @@ export namespace PutRegistryPolicyResponse { }); } +export interface PutRegistryScanningConfigurationRequest { + /** + *

                                  The scanning type to set for the registry.

                                  + *

                                  By default, the BASIC scan type is used. When basic scanning is set, you + * may specify filters to determine which individual repositories, or all repositories, are + * scanned when new images are pushed. Alternatively, you can do manual scans of images + * with basic scanning.

                                  + *

                                  When the ENHANCED scan type is set, Amazon Inspector provides automated, continuous + * scanning of all repositories in your registry.

                                  + */ + scanType?: ScanType | string; + + /** + *

                                  The scanning rules to use for the registry. A scanning rule is used to determine which + * repository filters are used and at what frequency scanning will occur.

                                  + */ + rules?: RegistryScanningRule[]; +} + +export namespace PutRegistryScanningConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRegistryScanningConfigurationRequest): any => ({ + ...obj, + }); +} + +export interface PutRegistryScanningConfigurationResponse { + /** + *

                                  The scanning configuration for your registry.

                                  + */ + registryScanningConfiguration?: RegistryScanningConfiguration; +} + +export namespace PutRegistryScanningConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRegistryScanningConfigurationResponse): any => ({ + ...obj, + }); +} + export interface PutReplicationConfigurationRequest { /** *

                                  An object representing the replication configuration for a registry.

                                  diff --git a/clients/client-ecr/src/pagination/DescribePullThroughCacheRulesPaginator.ts b/clients/client-ecr/src/pagination/DescribePullThroughCacheRulesPaginator.ts new file mode 100644 index 000000000000..89ad7da049d3 --- /dev/null +++ b/clients/client-ecr/src/pagination/DescribePullThroughCacheRulesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + DescribePullThroughCacheRulesCommand, + DescribePullThroughCacheRulesCommandInput, + DescribePullThroughCacheRulesCommandOutput, +} from "../commands/DescribePullThroughCacheRulesCommand"; +import { ECR } from "../ECR"; +import { ECRClient } from "../ECRClient"; +import { ECRPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: ECRClient, + input: DescribePullThroughCacheRulesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new DescribePullThroughCacheRulesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: ECR, + input: DescribePullThroughCacheRulesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.describePullThroughCacheRules(input, ...args); +}; +export async function* paginateDescribePullThroughCacheRules( + config: ECRPaginationConfiguration, + input: DescribePullThroughCacheRulesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: DescribePullThroughCacheRulesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof ECR) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof ECRClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected ECR | ECRClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-ecr/src/pagination/index.ts b/clients/client-ecr/src/pagination/index.ts index 71d3e6230d26..3718c2f039ef 100644 --- a/clients/client-ecr/src/pagination/index.ts +++ b/clients/client-ecr/src/pagination/index.ts @@ -1,5 +1,6 @@ export * from "./DescribeImageScanFindingsPaginator"; export * from "./DescribeImagesPaginator"; +export * from "./DescribePullThroughCacheRulesPaginator"; export * from "./DescribeRepositoriesPaginator"; export * from "./GetLifecyclePolicyPreviewPaginator"; export * from "./Interfaces"; diff --git a/clients/client-ecr/src/protocols/Aws_json1_1.ts b/clients/client-ecr/src/protocols/Aws_json1_1.ts index 568280dfe774..d9bbee34986b 100644 --- a/clients/client-ecr/src/protocols/Aws_json1_1.ts +++ b/clients/client-ecr/src/protocols/Aws_json1_1.ts @@ -6,6 +6,7 @@ import { expectNonNull as __expectNonNull, expectNumber as __expectNumber, expectString as __expectString, + limitedParseDouble as __limitedParseDouble, parseEpochTimestamp as __parseEpochTimestamp, } from "@aws-sdk/smithy-client"; import { @@ -23,15 +24,27 @@ import { } from "../commands/BatchCheckLayerAvailabilityCommand"; import { BatchDeleteImageCommandInput, BatchDeleteImageCommandOutput } from "../commands/BatchDeleteImageCommand"; import { BatchGetImageCommandInput, BatchGetImageCommandOutput } from "../commands/BatchGetImageCommand"; +import { + BatchGetRepositoryScanningConfigurationCommandInput, + BatchGetRepositoryScanningConfigurationCommandOutput, +} from "../commands/BatchGetRepositoryScanningConfigurationCommand"; import { CompleteLayerUploadCommandInput, CompleteLayerUploadCommandOutput, } from "../commands/CompleteLayerUploadCommand"; +import { + CreatePullThroughCacheRuleCommandInput, + CreatePullThroughCacheRuleCommandOutput, +} from "../commands/CreatePullThroughCacheRuleCommand"; import { CreateRepositoryCommandInput, CreateRepositoryCommandOutput } from "../commands/CreateRepositoryCommand"; import { DeleteLifecyclePolicyCommandInput, DeleteLifecyclePolicyCommandOutput, } from "../commands/DeleteLifecyclePolicyCommand"; +import { + DeletePullThroughCacheRuleCommandInput, + DeletePullThroughCacheRuleCommandOutput, +} from "../commands/DeletePullThroughCacheRuleCommand"; import { DeleteRegistryPolicyCommandInput, DeleteRegistryPolicyCommandOutput, @@ -50,6 +63,10 @@ import { DescribeImageScanFindingsCommandOutput, } from "../commands/DescribeImageScanFindingsCommand"; import { DescribeImagesCommandInput, DescribeImagesCommandOutput } from "../commands/DescribeImagesCommand"; +import { + DescribePullThroughCacheRulesCommandInput, + DescribePullThroughCacheRulesCommandOutput, +} from "../commands/DescribePullThroughCacheRulesCommand"; import { DescribeRegistryCommandInput, DescribeRegistryCommandOutput } from "../commands/DescribeRegistryCommand"; import { DescribeRepositoriesCommandInput, @@ -69,6 +86,10 @@ import { GetLifecyclePolicyPreviewCommandOutput, } from "../commands/GetLifecyclePolicyPreviewCommand"; import { GetRegistryPolicyCommandInput, GetRegistryPolicyCommandOutput } from "../commands/GetRegistryPolicyCommand"; +import { + GetRegistryScanningConfigurationCommandInput, + GetRegistryScanningConfigurationCommandOutput, +} from "../commands/GetRegistryScanningConfigurationCommand"; import { GetRepositoryPolicyCommandInput, GetRepositoryPolicyCommandOutput, @@ -93,6 +114,10 @@ import { } from "../commands/PutImageTagMutabilityCommand"; import { PutLifecyclePolicyCommandInput, PutLifecyclePolicyCommandOutput } from "../commands/PutLifecyclePolicyCommand"; import { PutRegistryPolicyCommandInput, PutRegistryPolicyCommandOutput } from "../commands/PutRegistryPolicyCommand"; +import { + PutRegistryScanningConfigurationCommandInput, + PutRegistryScanningConfigurationCommandOutput, +} from "../commands/PutRegistryScanningConfigurationCommand"; import { PutReplicationConfigurationCommandInput, PutReplicationConfigurationCommandOutput, @@ -112,18 +137,28 @@ import { UploadLayerPartCommandInput, UploadLayerPartCommandOutput } from "../co import { Attribute, AuthorizationData, + AwsEcrContainerImageDetails, BatchCheckLayerAvailabilityRequest, BatchCheckLayerAvailabilityResponse, BatchDeleteImageRequest, BatchDeleteImageResponse, BatchGetImageRequest, BatchGetImageResponse, + BatchGetRepositoryScanningConfigurationRequest, + BatchGetRepositoryScanningConfigurationResponse, CompleteLayerUploadRequest, CompleteLayerUploadResponse, + CreatePullThroughCacheRuleRequest, + CreatePullThroughCacheRuleResponse, CreateRepositoryRequest, CreateRepositoryResponse, + CvssScore, + CvssScoreAdjustment, + CvssScoreDetails, DeleteLifecyclePolicyRequest, DeleteLifecyclePolicyResponse, + DeletePullThroughCacheRuleRequest, + DeletePullThroughCacheRuleResponse, DeleteRegistryPolicyRequest, DeleteRegistryPolicyResponse, DeleteRepositoryPolicyRequest, @@ -137,12 +172,15 @@ import { DescribeImagesFilter, DescribeImagesRequest, DescribeImagesResponse, + DescribePullThroughCacheRulesRequest, + DescribePullThroughCacheRulesResponse, DescribeRegistryRequest, DescribeRegistryResponse, DescribeRepositoriesRequest, DescribeRepositoriesResponse, EmptyUploadException, EncryptionConfiguration, + EnhancedImageScanFinding, FindingSeverity, GetAuthorizationTokenRequest, GetAuthorizationTokenResponse, @@ -154,6 +192,8 @@ import { GetLifecyclePolicyResponse, GetRegistryPolicyRequest, GetRegistryPolicyResponse, + GetRegistryScanningConfigurationRequest, + GetRegistryScanningConfigurationResponse, GetRepositoryPolicyRequest, GetRepositoryPolicyResponse, Image, @@ -196,6 +236,10 @@ import { ListImagesResponse, ListTagsForResourceRequest, ListTagsForResourceResponse, + PackageVulnerabilityDetails, + PullThroughCacheRule, + PullThroughCacheRuleAlreadyExistsException, + PullThroughCacheRuleNotFoundException, PutImageRequest, PutImageResponse, PutImageScanningConfigurationRequest, @@ -206,10 +250,16 @@ import { PutLifecyclePolicyResponse, PutRegistryPolicyRequest, PutRegistryPolicyResponse, + PutRegistryScanningConfigurationRequest, + PutRegistryScanningConfigurationResponse, PutReplicationConfigurationRequest, PutReplicationConfigurationResponse, + Recommendation, ReferencedImagesNotFoundException, RegistryPolicyNotFoundException, + RegistryScanningConfiguration, + RegistryScanningRule, + Remediation, ReplicationConfiguration, ReplicationDestination, ReplicationRule, @@ -219,7 +269,13 @@ import { RepositoryNotEmptyException, RepositoryNotFoundException, RepositoryPolicyNotFoundException, + RepositoryScanningConfiguration, + RepositoryScanningConfigurationFailure, + Resource, + ResourceDetails, + ScanningRepositoryFilter, ScanNotFoundException, + ScoreDetails, ServerException, SetRepositoryPolicyRequest, SetRepositoryPolicyResponse, @@ -232,12 +288,14 @@ import { TagResourceResponse, TooManyTagsException, UnsupportedImageTypeException, + UnsupportedUpstreamRegistryException, UntagResourceRequest, UntagResourceResponse, UploadLayerPartRequest, UploadLayerPartResponse, UploadNotFoundException, ValidationException, + VulnerablePackage, } from "../models/models_0"; export const serializeAws_json1_1BatchCheckLayerAvailabilityCommand = async ( @@ -279,6 +337,19 @@ export const serializeAws_json1_1BatchGetImageCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand = async ( + input: BatchGetRepositoryScanningConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.BatchGetRepositoryScanningConfiguration", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1BatchGetRepositoryScanningConfigurationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CompleteLayerUploadCommand = async ( input: CompleteLayerUploadCommandInput, context: __SerdeContext @@ -292,6 +363,19 @@ export const serializeAws_json1_1CompleteLayerUploadCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CreatePullThroughCacheRuleCommand = async ( + input: CreatePullThroughCacheRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.CreatePullThroughCacheRule", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreatePullThroughCacheRuleRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateRepositoryCommand = async ( input: CreateRepositoryCommandInput, context: __SerdeContext @@ -318,6 +402,19 @@ export const serializeAws_json1_1DeleteLifecyclePolicyCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeletePullThroughCacheRuleCommand = async ( + input: DeletePullThroughCacheRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.DeletePullThroughCacheRule", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeletePullThroughCacheRuleRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteRegistryPolicyCommand = async ( input: DeleteRegistryPolicyCommandInput, context: __SerdeContext @@ -396,6 +493,19 @@ export const serializeAws_json1_1DescribeImageScanFindingsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribePullThroughCacheRulesCommand = async ( + input: DescribePullThroughCacheRulesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.DescribePullThroughCacheRules", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribePullThroughCacheRulesRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeRegistryCommand = async ( input: DescribeRegistryCommandInput, context: __SerdeContext @@ -487,6 +597,19 @@ export const serializeAws_json1_1GetRegistryPolicyCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1GetRegistryScanningConfigurationCommand = async ( + input: GetRegistryScanningConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.GetRegistryScanningConfiguration", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1GetRegistryScanningConfigurationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1GetRepositoryPolicyCommand = async ( input: GetRepositoryPolicyCommandInput, context: __SerdeContext @@ -604,6 +727,19 @@ export const serializeAws_json1_1PutRegistryPolicyCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1PutRegistryScanningConfigurationCommand = async ( + input: PutRegistryScanningConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AmazonEC2ContainerRegistry_V20150921.PutRegistryScanningConfiguration", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1PutRegistryScanningConfigurationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1PutReplicationConfigurationCommand = async ( input: PutReplicationConfigurationCommandInput, context: __SerdeContext @@ -905,6 +1041,84 @@ const deserializeAws_json1_1BatchGetImageCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1BatchGetRepositoryScanningConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1BatchGetRepositoryScanningConfigurationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1BatchGetRepositoryScanningConfigurationResponse(data, context); + const response: BatchGetRepositoryScanningConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1BatchGetRepositoryScanningConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "RepositoryNotFoundException": + case "com.amazonaws.ecr#RepositoryNotFoundException": + response = { + ...(await deserializeAws_json1_1RepositoryNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": + response = { + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1CompleteLayerUploadCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1023,6 +1237,100 @@ const deserializeAws_json1_1CompleteLayerUploadCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1CreatePullThroughCacheRuleCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreatePullThroughCacheRuleCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreatePullThroughCacheRuleResponse(data, context); + const response: CreatePullThroughCacheRuleCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreatePullThroughCacheRuleCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "LimitExceededException": + case "com.amazonaws.ecr#LimitExceededException": + response = { + ...(await deserializeAws_json1_1LimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PullThroughCacheRuleAlreadyExistsException": + case "com.amazonaws.ecr#PullThroughCacheRuleAlreadyExistsException": + response = { + ...(await deserializeAws_json1_1PullThroughCacheRuleAlreadyExistsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": + response = { + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedUpstreamRegistryException": + case "com.amazonaws.ecr#UnsupportedUpstreamRegistryException": + response = { + ...(await deserializeAws_json1_1UnsupportedUpstreamRegistryExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1CreateRepositoryCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1203,27 +1511,27 @@ const deserializeAws_json1_1DeleteLifecyclePolicyCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteRegistryPolicyCommand = async ( +export const deserializeAws_json1_1DeletePullThroughCacheRuleCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteRegistryPolicyCommandError(output, context); + return deserializeAws_json1_1DeletePullThroughCacheRuleCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteRegistryPolicyResponse(data, context); - const response: DeleteRegistryPolicyCommandOutput = { + contents = deserializeAws_json1_1DeletePullThroughCacheRuleResponse(data, context); + const response: DeletePullThroughCacheRuleCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteRegistryPolicyCommandError = async ( +const deserializeAws_json1_1DeletePullThroughCacheRuleCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1240,10 +1548,10 @@ const deserializeAws_json1_1DeleteRegistryPolicyCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "RegistryPolicyNotFoundException": - case "com.amazonaws.ecr#RegistryPolicyNotFoundException": + case "PullThroughCacheRuleNotFoundException": + case "com.amazonaws.ecr#PullThroughCacheRuleNotFoundException": response = { - ...(await deserializeAws_json1_1RegistryPolicyNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1PullThroughCacheRuleNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -1281,27 +1589,27 @@ const deserializeAws_json1_1DeleteRegistryPolicyCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteRepositoryCommand = async ( +export const deserializeAws_json1_1DeleteRegistryPolicyCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteRepositoryCommandError(output, context); + return deserializeAws_json1_1DeleteRegistryPolicyCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DeleteRepositoryResponse(data, context); - const response: DeleteRepositoryCommandOutput = { + contents = deserializeAws_json1_1DeleteRegistryPolicyResponse(data, context); + const response: DeleteRegistryPolicyCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DeleteRepositoryCommandError = async ( +const deserializeAws_json1_1DeleteRegistryPolicyCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1318,32 +1626,110 @@ const deserializeAws_json1_1DeleteRepositoryCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "KmsException": - case "com.amazonaws.ecr#KmsException": + case "RegistryPolicyNotFoundException": + case "com.amazonaws.ecr#RegistryPolicyNotFoundException": response = { - ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1RegistryPolicyNotFoundExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "RepositoryNotEmptyException": - case "com.amazonaws.ecr#RepositoryNotEmptyException": + case "ServerException": + case "com.amazonaws.ecr#ServerException": response = { - ...(await deserializeAws_json1_1RepositoryNotEmptyExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "RepositoryNotFoundException": - case "com.amazonaws.ecr#RepositoryNotFoundException": + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": response = { - ...(await deserializeAws_json1_1RepositoryNotFoundExceptionResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ServerException": - case "com.amazonaws.ecr#ServerException": + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DeleteRepositoryCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteRepositoryCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteRepositoryResponse(data, context); + const response: DeleteRepositoryCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteRepositoryCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "KmsException": + case "com.amazonaws.ecr#KmsException": + response = { + ...(await deserializeAws_json1_1KmsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "RepositoryNotEmptyException": + case "com.amazonaws.ecr#RepositoryNotEmptyException": + response = { + ...(await deserializeAws_json1_1RepositoryNotEmptyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "RepositoryNotFoundException": + case "com.amazonaws.ecr#RepositoryNotFoundException": + response = { + ...(await deserializeAws_json1_1RepositoryNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": response = { ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), name: errorCode, @@ -1678,6 +2064,92 @@ const deserializeAws_json1_1DescribeImageScanFindingsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DescribePullThroughCacheRulesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DescribePullThroughCacheRulesCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DescribePullThroughCacheRulesResponse(data, context); + const response: DescribePullThroughCacheRulesCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DescribePullThroughCacheRulesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "PullThroughCacheRuleNotFoundException": + case "com.amazonaws.ecr#PullThroughCacheRuleNotFoundException": + response = { + ...(await deserializeAws_json1_1PullThroughCacheRuleNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": + response = { + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -2217,6 +2689,76 @@ const deserializeAws_json1_1GetRegistryPolicyCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1GetRegistryScanningConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1GetRegistryScanningConfigurationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1GetRegistryScanningConfigurationResponse(data, context); + const response: GetRegistryScanningConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1GetRegistryScanningConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": + response = { + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1GetRepositoryPolicyCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2692,6 +3234,14 @@ const deserializeAws_json1_1PutImageScanningConfigurationCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -2919,27 +3469,27 @@ const deserializeAws_json1_1PutRegistryPolicyCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1PutReplicationConfigurationCommand = async ( +export const deserializeAws_json1_1PutRegistryScanningConfigurationCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1PutReplicationConfigurationCommandError(output, context); + return deserializeAws_json1_1PutRegistryScanningConfigurationCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1PutReplicationConfigurationResponse(data, context); - const response: PutReplicationConfigurationCommandOutput = { + contents = deserializeAws_json1_1PutRegistryScanningConfigurationResponse(data, context); + const response: PutRegistryScanningConfigurationCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1PutReplicationConfigurationCommandError = async ( +const deserializeAws_json1_1PutRegistryScanningConfigurationCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2989,27 +3539,27 @@ const deserializeAws_json1_1PutReplicationConfigurationCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1SetRepositoryPolicyCommand = async ( +export const deserializeAws_json1_1PutReplicationConfigurationCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1SetRepositoryPolicyCommandError(output, context); + return deserializeAws_json1_1PutReplicationConfigurationCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1SetRepositoryPolicyResponse(data, context); - const response: SetRepositoryPolicyCommandOutput = { + contents = deserializeAws_json1_1PutReplicationConfigurationResponse(data, context); + const response: PutReplicationConfigurationCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1SetRepositoryPolicyCommandError = async ( +const deserializeAws_json1_1PutReplicationConfigurationCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -3026,14 +3576,84 @@ const deserializeAws_json1_1SetRepositoryPolicyCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "RepositoryNotFoundException": - case "com.amazonaws.ecr#RepositoryNotFoundException": - response = { - ...(await deserializeAws_json1_1RepositoryNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; + case "ServerException": + case "com.amazonaws.ecr#ServerException": + response = { + ...(await deserializeAws_json1_1ServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1SetRepositoryPolicyCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1SetRepositoryPolicyCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1SetRepositoryPolicyResponse(data, context); + const response: SetRepositoryPolicyCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1SetRepositoryPolicyCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidParameterException": + case "com.amazonaws.ecr#InvalidParameterException": + response = { + ...(await deserializeAws_json1_1InvalidParameterExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "RepositoryNotFoundException": + case "com.amazonaws.ecr#RepositoryNotFoundException": + response = { + ...(await deserializeAws_json1_1RepositoryNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ServerException": case "com.amazonaws.ecr#ServerException": response = { @@ -3136,6 +3756,14 @@ const deserializeAws_json1_1StartImageScanCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ValidationException": + case "com.amazonaws.ecr#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -3783,6 +4411,36 @@ const deserializeAws_json1_1LimitExceededExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1PullThroughCacheRuleAlreadyExistsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1PullThroughCacheRuleAlreadyExistsException(body, context); + const contents: PullThroughCacheRuleAlreadyExistsException = { + name: "PullThroughCacheRuleAlreadyExistsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1PullThroughCacheRuleNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1PullThroughCacheRuleNotFoundException(body, context); + const contents: PullThroughCacheRuleNotFoundException = { + name: "PullThroughCacheRuleNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1ReferencedImagesNotFoundExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3933,6 +4591,21 @@ const deserializeAws_json1_1UnsupportedImageTypeExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1UnsupportedUpstreamRegistryExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1UnsupportedUpstreamRegistryException(body, context); + const contents: UnsupportedUpstreamRegistryException = { + name: "UnsupportedUpstreamRegistryException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1UploadNotFoundExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -4013,6 +4686,18 @@ const serializeAws_json1_1BatchGetImageRequest = (input: BatchGetImageRequest, c }; }; +const serializeAws_json1_1BatchGetRepositoryScanningConfigurationRequest = ( + input: BatchGetRepositoryScanningConfigurationRequest, + context: __SerdeContext +): any => { + return { + ...(input.repositoryNames !== undefined && + input.repositoryNames !== null && { + repositoryNames: serializeAws_json1_1ScanningConfigurationRepositoryNameList(input.repositoryNames, context), + }), + }; +}; + const serializeAws_json1_1CompleteLayerUploadRequest = ( input: CompleteLayerUploadRequest, context: __SerdeContext @@ -4029,6 +4714,19 @@ const serializeAws_json1_1CompleteLayerUploadRequest = ( }; }; +const serializeAws_json1_1CreatePullThroughCacheRuleRequest = ( + input: CreatePullThroughCacheRuleRequest, + context: __SerdeContext +): any => { + return { + ...(input.ecrRepositoryPrefix !== undefined && + input.ecrRepositoryPrefix !== null && { ecrRepositoryPrefix: input.ecrRepositoryPrefix }), + ...(input.registryId !== undefined && input.registryId !== null && { registryId: input.registryId }), + ...(input.upstreamRegistryUrl !== undefined && + input.upstreamRegistryUrl !== null && { upstreamRegistryUrl: input.upstreamRegistryUrl }), + }; +}; + const serializeAws_json1_1CreateRepositoryRequest = (input: CreateRepositoryRequest, context: __SerdeContext): any => { return { ...(input.encryptionConfiguration !== undefined && @@ -4062,6 +4760,17 @@ const serializeAws_json1_1DeleteLifecyclePolicyRequest = ( }; }; +const serializeAws_json1_1DeletePullThroughCacheRuleRequest = ( + input: DeletePullThroughCacheRuleRequest, + context: __SerdeContext +): any => { + return { + ...(input.ecrRepositoryPrefix !== undefined && + input.ecrRepositoryPrefix !== null && { ecrRepositoryPrefix: input.ecrRepositoryPrefix }), + ...(input.registryId !== undefined && input.registryId !== null && { registryId: input.registryId }), + }; +}; + const serializeAws_json1_1DeleteRegistryPolicyRequest = ( input: DeleteRegistryPolicyRequest, context: __SerdeContext @@ -4137,6 +4846,24 @@ const serializeAws_json1_1DescribeImagesRequest = (input: DescribeImagesRequest, }; }; +const serializeAws_json1_1DescribePullThroughCacheRulesRequest = ( + input: DescribePullThroughCacheRulesRequest, + context: __SerdeContext +): any => { + return { + ...(input.ecrRepositoryPrefixes !== undefined && + input.ecrRepositoryPrefixes !== null && { + ecrRepositoryPrefixes: serializeAws_json1_1PullThroughCacheRuleRepositoryPrefixList( + input.ecrRepositoryPrefixes, + context + ), + }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.registryId !== undefined && input.registryId !== null && { registryId: input.registryId }), + }; +}; + const serializeAws_json1_1DescribeRegistryRequest = (input: DescribeRegistryRequest, context: __SerdeContext): any => { return {}; }; @@ -4234,6 +4961,13 @@ const serializeAws_json1_1GetRegistryPolicyRequest = ( return {}; }; +const serializeAws_json1_1GetRegistryScanningConfigurationRequest = ( + input: GetRegistryScanningConfigurationRequest, + context: __SerdeContext +): any => { + return {}; +}; + const serializeAws_json1_1GetRepositoryPolicyRequest = ( input: GetRepositoryPolicyRequest, context: __SerdeContext @@ -4341,6 +5075,20 @@ const serializeAws_json1_1MediaTypeList = (input: string[], context: __SerdeCont }); }; +const serializeAws_json1_1PullThroughCacheRuleRepositoryPrefixList = ( + input: string[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1PutImageRequest = (input: PutImageRequest, context: __SerdeContext): any => { return { ...(input.imageDigest !== undefined && input.imageDigest !== null && { imageDigest: input.imageDigest }), @@ -4407,6 +5155,17 @@ const serializeAws_json1_1PutRegistryPolicyRequest = ( }; }; +const serializeAws_json1_1PutRegistryScanningConfigurationRequest = ( + input: PutRegistryScanningConfigurationRequest, + context: __SerdeContext +): any => { + return { + ...(input.rules !== undefined && + input.rules !== null && { rules: serializeAws_json1_1RegistryScanningRuleList(input.rules, context) }), + ...(input.scanType !== undefined && input.scanType !== null && { scanType: input.scanType }), + }; +}; + const serializeAws_json1_1PutReplicationConfigurationRequest = ( input: PutReplicationConfigurationRequest, context: __SerdeContext @@ -4419,6 +5178,27 @@ const serializeAws_json1_1PutReplicationConfigurationRequest = ( }; }; +const serializeAws_json1_1RegistryScanningRule = (input: RegistryScanningRule, context: __SerdeContext): any => { + return { + ...(input.repositoryFilters !== undefined && + input.repositoryFilters !== null && { + repositoryFilters: serializeAws_json1_1ScanningRepositoryFilterList(input.repositoryFilters, context), + }), + ...(input.scanFrequency !== undefined && input.scanFrequency !== null && { scanFrequency: input.scanFrequency }), + }; +}; + +const serializeAws_json1_1RegistryScanningRuleList = (input: RegistryScanningRule[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1RegistryScanningRule(entry, context); + }); +}; + const serializeAws_json1_1ReplicationConfiguration = ( input: ReplicationConfiguration, context: __SerdeContext @@ -4503,6 +5283,41 @@ const serializeAws_json1_1RepositoryNameList = (input: string[], context: __Serd }); }; +const serializeAws_json1_1ScanningConfigurationRepositoryNameList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1ScanningRepositoryFilter = ( + input: ScanningRepositoryFilter, + context: __SerdeContext +): any => { + return { + ...(input.filter !== undefined && input.filter !== null && { filter: input.filter }), + ...(input.filterType !== undefined && input.filterType !== null && { filterType: input.filterType }), + }; +}; + +const serializeAws_json1_1ScanningRepositoryFilterList = ( + input: ScanningRepositoryFilter[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1ScanningRepositoryFilter(entry, context); + }); +}; + const serializeAws_json1_1SetRepositoryPolicyRequest = ( input: SetRepositoryPolicyRequest, context: __SerdeContext @@ -4636,6 +5451,28 @@ const deserializeAws_json1_1AuthorizationDataList = (output: any, context: __Ser }); }; +const deserializeAws_json1_1AwsEcrContainerImageDetails = ( + output: any, + context: __SerdeContext +): AwsEcrContainerImageDetails => { + return { + architecture: __expectString(output.architecture), + author: __expectString(output.author), + imageHash: __expectString(output.imageHash), + imageTags: + output.imageTags !== undefined && output.imageTags !== null + ? deserializeAws_json1_1ImageTagsList(output.imageTags, context) + : undefined, + platform: __expectString(output.platform), + pushedAt: + output.pushedAt !== undefined && output.pushedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.pushedAt))) + : undefined, + registry: __expectString(output.registry), + repositoryName: __expectString(output.repositoryName), + } as any; +}; + const deserializeAws_json1_1BatchCheckLayerAvailabilityResponse = ( output: any, context: __SerdeContext @@ -4681,6 +5518,22 @@ const deserializeAws_json1_1BatchGetImageResponse = (output: any, context: __Ser } as any; }; +const deserializeAws_json1_1BatchGetRepositoryScanningConfigurationResponse = ( + output: any, + context: __SerdeContext +): BatchGetRepositoryScanningConfigurationResponse => { + return { + failures: + output.failures !== undefined && output.failures !== null + ? deserializeAws_json1_1RepositoryScanningConfigurationFailureList(output.failures, context) + : undefined, + scanningConfigurations: + output.scanningConfigurations !== undefined && output.scanningConfigurations !== null + ? deserializeAws_json1_1RepositoryScanningConfigurationList(output.scanningConfigurations, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1CompleteLayerUploadResponse = ( output: any, context: __SerdeContext @@ -4693,6 +5546,21 @@ const deserializeAws_json1_1CompleteLayerUploadResponse = ( } as any; }; +const deserializeAws_json1_1CreatePullThroughCacheRuleResponse = ( + output: any, + context: __SerdeContext +): CreatePullThroughCacheRuleResponse => { + return { + createdAt: + output.createdAt !== undefined && output.createdAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdAt))) + : undefined, + ecrRepositoryPrefix: __expectString(output.ecrRepositoryPrefix), + registryId: __expectString(output.registryId), + upstreamRegistryUrl: __expectString(output.upstreamRegistryUrl), + } as any; +}; + const deserializeAws_json1_1CreateRepositoryResponse = ( output: any, context: __SerdeContext @@ -4705,10 +5573,61 @@ const deserializeAws_json1_1CreateRepositoryResponse = ( } as any; }; -const deserializeAws_json1_1DeleteLifecyclePolicyResponse = ( - output: any, - context: __SerdeContext -): DeleteLifecyclePolicyResponse => { +const deserializeAws_json1_1CvssScore = (output: any, context: __SerdeContext): CvssScore => { + return { + baseScore: __limitedParseDouble(output.baseScore), + scoringVector: __expectString(output.scoringVector), + source: __expectString(output.source), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_json1_1CvssScoreAdjustment = (output: any, context: __SerdeContext): CvssScoreAdjustment => { + return { + metric: __expectString(output.metric), + reason: __expectString(output.reason), + } as any; +}; + +const deserializeAws_json1_1CvssScoreAdjustmentList = (output: any, context: __SerdeContext): CvssScoreAdjustment[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1CvssScoreAdjustment(entry, context); + }); +}; + +const deserializeAws_json1_1CvssScoreDetails = (output: any, context: __SerdeContext): CvssScoreDetails => { + return { + adjustments: + output.adjustments !== undefined && output.adjustments !== null + ? deserializeAws_json1_1CvssScoreAdjustmentList(output.adjustments, context) + : undefined, + score: __limitedParseDouble(output.score), + scoreSource: __expectString(output.scoreSource), + scoringVector: __expectString(output.scoringVector), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_json1_1CvssScoreList = (output: any, context: __SerdeContext): CvssScore[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1CvssScore(entry, context); + }); +}; + +const deserializeAws_json1_1DeleteLifecyclePolicyResponse = ( + output: any, + context: __SerdeContext +): DeleteLifecyclePolicyResponse => { return { lastEvaluatedAt: output.lastEvaluatedAt !== undefined && output.lastEvaluatedAt !== null @@ -4720,6 +5639,21 @@ const deserializeAws_json1_1DeleteLifecyclePolicyResponse = ( } as any; }; +const deserializeAws_json1_1DeletePullThroughCacheRuleResponse = ( + output: any, + context: __SerdeContext +): DeletePullThroughCacheRuleResponse => { + return { + createdAt: + output.createdAt !== undefined && output.createdAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdAt))) + : undefined, + ecrRepositoryPrefix: __expectString(output.ecrRepositoryPrefix), + registryId: __expectString(output.registryId), + upstreamRegistryUrl: __expectString(output.upstreamRegistryUrl), + } as any; +}; + const deserializeAws_json1_1DeleteRegistryPolicyResponse = ( output: any, context: __SerdeContext @@ -4803,6 +5737,19 @@ const deserializeAws_json1_1DescribeImagesResponse = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1DescribePullThroughCacheRulesResponse = ( + output: any, + context: __SerdeContext +): DescribePullThroughCacheRulesResponse => { + return { + nextToken: __expectString(output.nextToken), + pullThroughCacheRules: + output.pullThroughCacheRules !== undefined && output.pullThroughCacheRules !== null + ? deserializeAws_json1_1PullThroughCacheRuleList(output.pullThroughCacheRules, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1DescribeRegistryResponse = ( output: any, context: __SerdeContext @@ -4845,6 +5792,64 @@ const deserializeAws_json1_1EncryptionConfiguration = ( } as any; }; +const deserializeAws_json1_1EnhancedImageScanFinding = ( + output: any, + context: __SerdeContext +): EnhancedImageScanFinding => { + return { + awsAccountId: __expectString(output.awsAccountId), + description: __expectString(output.description), + findingArn: __expectString(output.findingArn), + firstObservedAt: + output.firstObservedAt !== undefined && output.firstObservedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.firstObservedAt))) + : undefined, + lastObservedAt: + output.lastObservedAt !== undefined && output.lastObservedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastObservedAt))) + : undefined, + packageVulnerabilityDetails: + output.packageVulnerabilityDetails !== undefined && output.packageVulnerabilityDetails !== null + ? deserializeAws_json1_1PackageVulnerabilityDetails(output.packageVulnerabilityDetails, context) + : undefined, + remediation: + output.remediation !== undefined && output.remediation !== null + ? deserializeAws_json1_1Remediation(output.remediation, context) + : undefined, + resources: + output.resources !== undefined && output.resources !== null + ? deserializeAws_json1_1ResourceList(output.resources, context) + : undefined, + score: __limitedParseDouble(output.score), + scoreDetails: + output.scoreDetails !== undefined && output.scoreDetails !== null + ? deserializeAws_json1_1ScoreDetails(output.scoreDetails, context) + : undefined, + severity: __expectString(output.severity), + status: __expectString(output.status), + title: __expectString(output.title), + type: __expectString(output.type), + updatedAt: + output.updatedAt !== undefined && output.updatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updatedAt))) + : undefined, + } as any; +}; + +const deserializeAws_json1_1EnhancedImageScanFindingList = ( + output: any, + context: __SerdeContext +): EnhancedImageScanFinding[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1EnhancedImageScanFinding(entry, context); + }); +}; + const deserializeAws_json1_1FindingSeverityCounts = ( output: any, context: __SerdeContext @@ -4931,6 +5936,19 @@ const deserializeAws_json1_1GetRegistryPolicyResponse = ( } as any; }; +const deserializeAws_json1_1GetRegistryScanningConfigurationResponse = ( + output: any, + context: __SerdeContext +): GetRegistryScanningConfigurationResponse => { + return { + registryId: __expectString(output.registryId), + scanningConfiguration: + output.scanningConfiguration !== undefined && output.scanningConfiguration !== null + ? deserializeAws_json1_1RegistryScanningConfiguration(output.scanningConfiguration, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1GetRepositoryPolicyResponse = ( output: any, context: __SerdeContext @@ -5117,6 +6135,10 @@ const deserializeAws_json1_1ImageScanFindingList = (output: any, context: __Serd const deserializeAws_json1_1ImageScanFindings = (output: any, context: __SerdeContext): ImageScanFindings => { return { + enhancedFindings: + output.enhancedFindings !== undefined && output.enhancedFindings !== null + ? deserializeAws_json1_1EnhancedImageScanFindingList(output.enhancedFindings, context) + : undefined, findingSeverityCounts: output.findingSeverityCounts !== undefined && output.findingSeverityCounts !== null ? deserializeAws_json1_1FindingSeverityCounts(output.findingSeverityCounts, context) @@ -5192,6 +6214,17 @@ const deserializeAws_json1_1ImageTagList = (output: any, context: __SerdeContext }); }; +const deserializeAws_json1_1ImageTagsList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_json1_1InitiateLayerUploadResponse = ( output: any, context: __SerdeContext @@ -5430,6 +6463,86 @@ const deserializeAws_json1_1ListTagsForResourceResponse = ( } as any; }; +const deserializeAws_json1_1PackageVulnerabilityDetails = ( + output: any, + context: __SerdeContext +): PackageVulnerabilityDetails => { + return { + cvss: + output.cvss !== undefined && output.cvss !== null + ? deserializeAws_json1_1CvssScoreList(output.cvss, context) + : undefined, + referenceUrls: + output.referenceUrls !== undefined && output.referenceUrls !== null + ? deserializeAws_json1_1ReferenceUrlsList(output.referenceUrls, context) + : undefined, + relatedVulnerabilities: + output.relatedVulnerabilities !== undefined && output.relatedVulnerabilities !== null + ? deserializeAws_json1_1RelatedVulnerabilitiesList(output.relatedVulnerabilities, context) + : undefined, + source: __expectString(output.source), + sourceUrl: __expectString(output.sourceUrl), + vendorCreatedAt: + output.vendorCreatedAt !== undefined && output.vendorCreatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.vendorCreatedAt))) + : undefined, + vendorSeverity: __expectString(output.vendorSeverity), + vendorUpdatedAt: + output.vendorUpdatedAt !== undefined && output.vendorUpdatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.vendorUpdatedAt))) + : undefined, + vulnerabilityId: __expectString(output.vulnerabilityId), + vulnerablePackages: + output.vulnerablePackages !== undefined && output.vulnerablePackages !== null + ? deserializeAws_json1_1VulnerablePackagesList(output.vulnerablePackages, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1PullThroughCacheRule = (output: any, context: __SerdeContext): PullThroughCacheRule => { + return { + createdAt: + output.createdAt !== undefined && output.createdAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdAt))) + : undefined, + ecrRepositoryPrefix: __expectString(output.ecrRepositoryPrefix), + registryId: __expectString(output.registryId), + upstreamRegistryUrl: __expectString(output.upstreamRegistryUrl), + } as any; +}; + +const deserializeAws_json1_1PullThroughCacheRuleAlreadyExistsException = ( + output: any, + context: __SerdeContext +): PullThroughCacheRuleAlreadyExistsException => { + return { + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_json1_1PullThroughCacheRuleList = ( + output: any, + context: __SerdeContext +): PullThroughCacheRule[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1PullThroughCacheRule(entry, context); + }); +}; + +const deserializeAws_json1_1PullThroughCacheRuleNotFoundException = ( + output: any, + context: __SerdeContext +): PullThroughCacheRuleNotFoundException => { + return { + message: __expectString(output.message), + } as any; +}; + const deserializeAws_json1_1PutImageResponse = (output: any, context: __SerdeContext): PutImageResponse => { return { image: @@ -5485,6 +6598,18 @@ const deserializeAws_json1_1PutRegistryPolicyResponse = ( } as any; }; +const deserializeAws_json1_1PutRegistryScanningConfigurationResponse = ( + output: any, + context: __SerdeContext +): PutRegistryScanningConfigurationResponse => { + return { + registryScanningConfiguration: + output.registryScanningConfiguration !== undefined && output.registryScanningConfiguration !== null + ? deserializeAws_json1_1RegistryScanningConfiguration(output.registryScanningConfiguration, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1PutReplicationConfigurationResponse = ( output: any, context: __SerdeContext @@ -5497,6 +6622,13 @@ const deserializeAws_json1_1PutReplicationConfigurationResponse = ( } as any; }; +const deserializeAws_json1_1Recommendation = (output: any, context: __SerdeContext): Recommendation => { + return { + text: __expectString(output.text), + url: __expectString(output.url), + } as any; +}; + const deserializeAws_json1_1ReferencedImagesNotFoundException = ( output: any, context: __SerdeContext @@ -5506,6 +6638,17 @@ const deserializeAws_json1_1ReferencedImagesNotFoundException = ( } as any; }; +const deserializeAws_json1_1ReferenceUrlsList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_json1_1RegistryPolicyNotFoundException = ( output: any, context: __SerdeContext @@ -5515,6 +6658,63 @@ const deserializeAws_json1_1RegistryPolicyNotFoundException = ( } as any; }; +const deserializeAws_json1_1RegistryScanningConfiguration = ( + output: any, + context: __SerdeContext +): RegistryScanningConfiguration => { + return { + rules: + output.rules !== undefined && output.rules !== null + ? deserializeAws_json1_1RegistryScanningRuleList(output.rules, context) + : undefined, + scanType: __expectString(output.scanType), + } as any; +}; + +const deserializeAws_json1_1RegistryScanningRule = (output: any, context: __SerdeContext): RegistryScanningRule => { + return { + repositoryFilters: + output.repositoryFilters !== undefined && output.repositoryFilters !== null + ? deserializeAws_json1_1ScanningRepositoryFilterList(output.repositoryFilters, context) + : undefined, + scanFrequency: __expectString(output.scanFrequency), + } as any; +}; + +const deserializeAws_json1_1RegistryScanningRuleList = ( + output: any, + context: __SerdeContext +): RegistryScanningRule[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1RegistryScanningRule(entry, context); + }); +}; + +const deserializeAws_json1_1RelatedVulnerabilitiesList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_json1_1Remediation = (output: any, context: __SerdeContext): Remediation => { + return { + recommendation: + output.recommendation !== undefined && output.recommendation !== null + ? deserializeAws_json1_1Recommendation(output.recommendation, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1ReplicationConfiguration = ( output: any, context: __SerdeContext @@ -5659,12 +6859,133 @@ const deserializeAws_json1_1RepositoryPolicyNotFoundException = ( } as any; }; +const deserializeAws_json1_1RepositoryScanningConfiguration = ( + output: any, + context: __SerdeContext +): RepositoryScanningConfiguration => { + return { + appliedScanFilters: + output.appliedScanFilters !== undefined && output.appliedScanFilters !== null + ? deserializeAws_json1_1ScanningRepositoryFilterList(output.appliedScanFilters, context) + : undefined, + repositoryArn: __expectString(output.repositoryArn), + repositoryName: __expectString(output.repositoryName), + scanFrequency: __expectString(output.scanFrequency), + scanOnPush: __expectBoolean(output.scanOnPush), + } as any; +}; + +const deserializeAws_json1_1RepositoryScanningConfigurationFailure = ( + output: any, + context: __SerdeContext +): RepositoryScanningConfigurationFailure => { + return { + failureCode: __expectString(output.failureCode), + failureReason: __expectString(output.failureReason), + repositoryName: __expectString(output.repositoryName), + } as any; +}; + +const deserializeAws_json1_1RepositoryScanningConfigurationFailureList = ( + output: any, + context: __SerdeContext +): RepositoryScanningConfigurationFailure[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1RepositoryScanningConfigurationFailure(entry, context); + }); +}; + +const deserializeAws_json1_1RepositoryScanningConfigurationList = ( + output: any, + context: __SerdeContext +): RepositoryScanningConfiguration[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1RepositoryScanningConfiguration(entry, context); + }); +}; + +const deserializeAws_json1_1Resource = (output: any, context: __SerdeContext): Resource => { + return { + details: + output.details !== undefined && output.details !== null + ? deserializeAws_json1_1ResourceDetails(output.details, context) + : undefined, + id: __expectString(output.id), + tags: + output.tags !== undefined && output.tags !== null ? deserializeAws_json1_1Tags(output.tags, context) : undefined, + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_json1_1ResourceDetails = (output: any, context: __SerdeContext): ResourceDetails => { + return { + awsEcrContainerImage: + output.awsEcrContainerImage !== undefined && output.awsEcrContainerImage !== null + ? deserializeAws_json1_1AwsEcrContainerImageDetails(output.awsEcrContainerImage, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1ResourceList = (output: any, context: __SerdeContext): Resource[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Resource(entry, context); + }); +}; + +const deserializeAws_json1_1ScanningRepositoryFilter = ( + output: any, + context: __SerdeContext +): ScanningRepositoryFilter => { + return { + filter: __expectString(output.filter), + filterType: __expectString(output.filterType), + } as any; +}; + +const deserializeAws_json1_1ScanningRepositoryFilterList = ( + output: any, + context: __SerdeContext +): ScanningRepositoryFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1ScanningRepositoryFilter(entry, context); + }); +}; + const deserializeAws_json1_1ScanNotFoundException = (output: any, context: __SerdeContext): ScanNotFoundException => { return { message: __expectString(output.message), } as any; }; +const deserializeAws_json1_1ScoreDetails = (output: any, context: __SerdeContext): ScoreDetails => { + return { + cvss: + output.cvss !== undefined && output.cvss !== null + ? deserializeAws_json1_1CvssScoreDetails(output.cvss, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1ServerException = (output: any, context: __SerdeContext): ServerException => { return { message: __expectString(output.message), @@ -5731,6 +7052,18 @@ const deserializeAws_json1_1TagResourceResponse = (output: any, context: __Serde return {} as any; }; +const deserializeAws_json1_1Tags = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + const deserializeAws_json1_1TooManyTagsException = (output: any, context: __SerdeContext): TooManyTagsException => { return { message: __expectString(output.message), @@ -5746,6 +7079,15 @@ const deserializeAws_json1_1UnsupportedImageTypeException = ( } as any; }; +const deserializeAws_json1_1UnsupportedUpstreamRegistryException = ( + output: any, + context: __SerdeContext +): UnsupportedUpstreamRegistryException => { + return { + message: __expectString(output.message), + } as any; +}; + const deserializeAws_json1_1UntagResourceResponse = (output: any, context: __SerdeContext): UntagResourceResponse => { return {} as any; }; @@ -5777,6 +7119,30 @@ const deserializeAws_json1_1ValidationException = (output: any, context: __Serde } as any; }; +const deserializeAws_json1_1VulnerablePackage = (output: any, context: __SerdeContext): VulnerablePackage => { + return { + arch: __expectString(output.arch), + epoch: __expectInt32(output.epoch), + filePath: __expectString(output.filePath), + name: __expectString(output.name), + packageManager: __expectString(output.packageManager), + release: __expectString(output.release), + sourceLayerHash: __expectString(output.sourceLayerHash), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_json1_1VulnerablePackagesList = (output: any, context: __SerdeContext): VulnerablePackage[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1VulnerablePackage(entry, context); + }); +}; + const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ httpStatusCode: output.statusCode, requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], diff --git a/clients/client-evidently/.gitignore b/clients/client-evidently/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-evidently/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-evidently/LICENSE b/clients/client-evidently/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-evidently/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-evidently/README.md b/clients/client-evidently/README.md new file mode 100644 index 000000000000..5154af16d94d --- /dev/null +++ b/clients/client-evidently/README.md @@ -0,0 +1,211 @@ +# @aws-sdk/client-evidently + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-evidently/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-evidently) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-evidently.svg)](https://www.npmjs.com/package/@aws-sdk/client-evidently) + +## Description + +AWS SDK for JavaScript Evidently Client for Node.js, Browser and React Native. + +

                                  You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage +of your users while you roll out the feature. You can monitor the performance of the new feature +to help you decide when to ramp up traffic to your users. This helps you +reduce risk and identify unintended consequences before you fully launch the feature.

                                  +

                                  You can also conduct A/B experiments to make feature design decisions based on evidence +and data. An experiment can test as many as five variations at once. Evidently collects +experiment data and analyzes it using statistical methods. It also provides clear +recommendations about which variations perform better. You can test both user-facing features +and backend features.

                                  + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-evidently +using your favorite package manager: + +- `npm install @aws-sdk/client-evidently` +- `yarn add @aws-sdk/client-evidently` +- `pnpm add @aws-sdk/client-evidently` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `EvidentlyClient` and +the commands you need, for example `BatchEvaluateFeatureCommand`: + +```js +// ES5 example +const { EvidentlyClient, BatchEvaluateFeatureCommand } = require("@aws-sdk/client-evidently"); +``` + +```ts +// ES6+ example +import { EvidentlyClient, BatchEvaluateFeatureCommand } from "@aws-sdk/client-evidently"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new EvidentlyClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new BatchEvaluateFeatureCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-evidently"; +const client = new AWS.Evidently({ region: "REGION" }); + +// async/await. +try { + const data = await client.batchEvaluateFeature(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .batchEvaluateFeature(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.batchEvaluateFeature(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-evidently` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-evidently/jest.config.js b/clients/client-evidently/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-evidently/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-evidently/package.json b/clients/client-evidently/package.json new file mode 100644 index 000000000000..0e68ad957f26 --- /dev/null +++ b/clients/client-evidently/package.json @@ -0,0 +1,94 @@ +{ + "name": "@aws-sdk/client-evidently", + "description": "AWS SDK for JavaScript Evidently Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-evidently", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-evidently" + } +} diff --git a/clients/client-evidently/src/Evidently.ts b/clients/client-evidently/src/Evidently.ts new file mode 100644 index 000000000000..c820ef94e525 --- /dev/null +++ b/clients/client-evidently/src/Evidently.ts @@ -0,0 +1,1213 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + BatchEvaluateFeatureCommand, + BatchEvaluateFeatureCommandInput, + BatchEvaluateFeatureCommandOutput, +} from "./commands/BatchEvaluateFeatureCommand"; +import { + CreateExperimentCommand, + CreateExperimentCommandInput, + CreateExperimentCommandOutput, +} from "./commands/CreateExperimentCommand"; +import { + CreateFeatureCommand, + CreateFeatureCommandInput, + CreateFeatureCommandOutput, +} from "./commands/CreateFeatureCommand"; +import { + CreateLaunchCommand, + CreateLaunchCommandInput, + CreateLaunchCommandOutput, +} from "./commands/CreateLaunchCommand"; +import { + CreateProjectCommand, + CreateProjectCommandInput, + CreateProjectCommandOutput, +} from "./commands/CreateProjectCommand"; +import { + DeleteExperimentCommand, + DeleteExperimentCommandInput, + DeleteExperimentCommandOutput, +} from "./commands/DeleteExperimentCommand"; +import { + DeleteFeatureCommand, + DeleteFeatureCommandInput, + DeleteFeatureCommandOutput, +} from "./commands/DeleteFeatureCommand"; +import { + DeleteLaunchCommand, + DeleteLaunchCommandInput, + DeleteLaunchCommandOutput, +} from "./commands/DeleteLaunchCommand"; +import { + DeleteProjectCommand, + DeleteProjectCommandInput, + DeleteProjectCommandOutput, +} from "./commands/DeleteProjectCommand"; +import { + EvaluateFeatureCommand, + EvaluateFeatureCommandInput, + EvaluateFeatureCommandOutput, +} from "./commands/EvaluateFeatureCommand"; +import { + GetExperimentCommand, + GetExperimentCommandInput, + GetExperimentCommandOutput, +} from "./commands/GetExperimentCommand"; +import { + GetExperimentResultsCommand, + GetExperimentResultsCommandInput, + GetExperimentResultsCommandOutput, +} from "./commands/GetExperimentResultsCommand"; +import { GetFeatureCommand, GetFeatureCommandInput, GetFeatureCommandOutput } from "./commands/GetFeatureCommand"; +import { GetLaunchCommand, GetLaunchCommandInput, GetLaunchCommandOutput } from "./commands/GetLaunchCommand"; +import { GetProjectCommand, GetProjectCommandInput, GetProjectCommandOutput } from "./commands/GetProjectCommand"; +import { + ListExperimentsCommand, + ListExperimentsCommandInput, + ListExperimentsCommandOutput, +} from "./commands/ListExperimentsCommand"; +import { + ListFeaturesCommand, + ListFeaturesCommandInput, + ListFeaturesCommandOutput, +} from "./commands/ListFeaturesCommand"; +import { + ListLaunchesCommand, + ListLaunchesCommandInput, + ListLaunchesCommandOutput, +} from "./commands/ListLaunchesCommand"; +import { + ListProjectsCommand, + ListProjectsCommandInput, + ListProjectsCommandOutput, +} from "./commands/ListProjectsCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + PutProjectEventsCommand, + PutProjectEventsCommandInput, + PutProjectEventsCommandOutput, +} from "./commands/PutProjectEventsCommand"; +import { + StartExperimentCommand, + StartExperimentCommandInput, + StartExperimentCommandOutput, +} from "./commands/StartExperimentCommand"; +import { StartLaunchCommand, StartLaunchCommandInput, StartLaunchCommandOutput } from "./commands/StartLaunchCommand"; +import { + StopExperimentCommand, + StopExperimentCommandInput, + StopExperimentCommandOutput, +} from "./commands/StopExperimentCommand"; +import { StopLaunchCommand, StopLaunchCommandInput, StopLaunchCommandOutput } from "./commands/StopLaunchCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateExperimentCommand, + UpdateExperimentCommandInput, + UpdateExperimentCommandOutput, +} from "./commands/UpdateExperimentCommand"; +import { + UpdateFeatureCommand, + UpdateFeatureCommandInput, + UpdateFeatureCommandOutput, +} from "./commands/UpdateFeatureCommand"; +import { + UpdateLaunchCommand, + UpdateLaunchCommandInput, + UpdateLaunchCommandOutput, +} from "./commands/UpdateLaunchCommand"; +import { + UpdateProjectCommand, + UpdateProjectCommandInput, + UpdateProjectCommandOutput, +} from "./commands/UpdateProjectCommand"; +import { + UpdateProjectDataDeliveryCommand, + UpdateProjectDataDeliveryCommandInput, + UpdateProjectDataDeliveryCommandOutput, +} from "./commands/UpdateProjectDataDeliveryCommand"; +import { EvidentlyClient } from "./EvidentlyClient"; + +/** + *

                                  You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage + * of your users while you roll out the feature. You can monitor the performance of the new feature + * to help you decide when to ramp up traffic to your users. This helps you + * reduce risk and identify unintended consequences before you fully launch the feature.

                                  + *

                                  You can also conduct A/B experiments to make feature design decisions based on evidence + * and data. An experiment can test as many as five variations at once. Evidently collects + * experiment data and analyzes it using statistical methods. It also provides clear + * recommendations about which variations perform better. You can test both user-facing features + * and backend features.

                                  + */ +export class Evidently extends EvidentlyClient { + /** + *

                                  This operation assigns feature variation to user sessions. For each user session, you pass + * in an entityID that represents the user. Evidently then checks the evaluation + * rules and assigns the variation.

                                  + *

                                  The first rules that are evaluated are the override rules. If the user's + * entityID matches an override rule, the user is served the variation specified + * by that rule.

                                  + *

                                  Next, if there is a launch of the feature, the user might be assigned to a variation in + * the launch. The chance of this depends on the percentage of users that are allocated to that + * launch. If the user is enrolled in the launch, the variation they are served depends on the + * allocation of the various feature variations used for the launch.

                                  + *

                                  If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might + * be assigned to a variation in the experiment. The chance of this + * depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, + * the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                  + *

                                  If the user is not assigned to a launch or experiment, they are served the default variation.

                                  + */ + public batchEvaluateFeature( + args: BatchEvaluateFeatureCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public batchEvaluateFeature( + args: BatchEvaluateFeatureCommandInput, + cb: (err: any, data?: BatchEvaluateFeatureCommandOutput) => void + ): void; + public batchEvaluateFeature( + args: BatchEvaluateFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: BatchEvaluateFeatureCommandOutput) => void + ): void; + public batchEvaluateFeature( + args: BatchEvaluateFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: BatchEvaluateFeatureCommandOutput) => void), + cb?: (err: any, data?: BatchEvaluateFeatureCommandOutput) => void + ): Promise | void { + const command = new BatchEvaluateFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates an Evidently experiment. Before you create an experiment, + * you must create the feature to use for the experiment.

                                  + *

                                  An experiment helps you make feature design + * decisions based on evidence and data. An experiment can test as + * many as five variations at once. Evidently collects experiment data and analyzes it by statistical methods, and provides + * clear recommendations about which variations perform better.

                                  + *

                                  Don't use this operation to update an existing experiment. Instead, use + * UpdateExperiment.

                                  + */ + public createExperiment( + args: CreateExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createExperiment( + args: CreateExperimentCommandInput, + cb: (err: any, data?: CreateExperimentCommandOutput) => void + ): void; + public createExperiment( + args: CreateExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateExperimentCommandOutput) => void + ): void; + public createExperiment( + args: CreateExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateExperimentCommandOutput) => void), + cb?: (err: any, data?: CreateExperimentCommandOutput) => void + ): Promise | void { + const command = new CreateExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates an Evidently feature that you want to launch or test. You can define up to + * five variations of a feature, and use these variations in your launches and experiments. A feature must be created in + * a project. For information about creating a project, see CreateProject.

                                  + *

                                  Don't use this operation to update an existing feature. Instead, use + * UpdateFeature.

                                  + */ + public createFeature( + args: CreateFeatureCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createFeature( + args: CreateFeatureCommandInput, + cb: (err: any, data?: CreateFeatureCommandOutput) => void + ): void; + public createFeature( + args: CreateFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateFeatureCommandOutput) => void + ): void; + public createFeature( + args: CreateFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateFeatureCommandOutput) => void), + cb?: (err: any, data?: CreateFeatureCommandOutput) => void + ): Promise | void { + const command = new CreateFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates a launch of a given feature. Before you create a launch, you + * must create the feature to use for the launch.

                                  + *

                                  You can use a launch to safely validate new features by serving them to a specified + * percentage of your users while you roll out the feature. You can monitor the performance of + * the new feature to help you decide when to ramp up traffic to more users. This helps you + * reduce risk and identify unintended consequences before you fully launch the feature.

                                  + *

                                  Don't use this operation to update an existing launch. Instead, use + * UpdateLaunch.

                                  + */ + public createLaunch( + args: CreateLaunchCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createLaunch(args: CreateLaunchCommandInput, cb: (err: any, data?: CreateLaunchCommandOutput) => void): void; + public createLaunch( + args: CreateLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateLaunchCommandOutput) => void + ): void; + public createLaunch( + args: CreateLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateLaunchCommandOutput) => void), + cb?: (err: any, data?: CreateLaunchCommandOutput) => void + ): Promise | void { + const command = new CreateLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates a project, which is the logical object in Evidently that can contain features, launches, and + * experiments. Use projects to group similar features together.

                                  + *

                                  To update an existing project, use UpdateProject.

                                  + */ + public createProject( + args: CreateProjectCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createProject( + args: CreateProjectCommandInput, + cb: (err: any, data?: CreateProjectCommandOutput) => void + ): void; + public createProject( + args: CreateProjectCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateProjectCommandOutput) => void + ): void; + public createProject( + args: CreateProjectCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateProjectCommandOutput) => void), + cb?: (err: any, data?: CreateProjectCommandOutput) => void + ): Promise | void { + const command = new CreateProjectCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes an Evidently experiment. The feature used for the experiment is not deleted.

                                  + *

                                  To stop an experiment without deleting it, use StopExperiment.

                                  + */ + public deleteExperiment( + args: DeleteExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteExperiment( + args: DeleteExperimentCommandInput, + cb: (err: any, data?: DeleteExperimentCommandOutput) => void + ): void; + public deleteExperiment( + args: DeleteExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteExperimentCommandOutput) => void + ): void; + public deleteExperiment( + args: DeleteExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteExperimentCommandOutput) => void), + cb?: (err: any, data?: DeleteExperimentCommandOutput) => void + ): Promise | void { + const command = new DeleteExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes an Evidently feature.

                                  + */ + public deleteFeature( + args: DeleteFeatureCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteFeature( + args: DeleteFeatureCommandInput, + cb: (err: any, data?: DeleteFeatureCommandOutput) => void + ): void; + public deleteFeature( + args: DeleteFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteFeatureCommandOutput) => void + ): void; + public deleteFeature( + args: DeleteFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteFeatureCommandOutput) => void), + cb?: (err: any, data?: DeleteFeatureCommandOutput) => void + ): Promise | void { + const command = new DeleteFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes an Evidently launch. The feature used for the launch is not deleted.

                                  + *

                                  To stop a launch without deleting it, use StopLaunch.

                                  + */ + public deleteLaunch( + args: DeleteLaunchCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteLaunch(args: DeleteLaunchCommandInput, cb: (err: any, data?: DeleteLaunchCommandOutput) => void): void; + public deleteLaunch( + args: DeleteLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteLaunchCommandOutput) => void + ): void; + public deleteLaunch( + args: DeleteLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteLaunchCommandOutput) => void), + cb?: (err: any, data?: DeleteLaunchCommandOutput) => void + ): Promise | void { + const command = new DeleteLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes an Evidently project. Before you can delete a project, you must delete all the + * features that the project contains. To delete a feature, use DeleteFeature.

                                  + */ + public deleteProject( + args: DeleteProjectCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteProject( + args: DeleteProjectCommandInput, + cb: (err: any, data?: DeleteProjectCommandOutput) => void + ): void; + public deleteProject( + args: DeleteProjectCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteProjectCommandOutput) => void + ): void; + public deleteProject( + args: DeleteProjectCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteProjectCommandOutput) => void), + cb?: (err: any, data?: DeleteProjectCommandOutput) => void + ): Promise | void { + const command = new DeleteProjectCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  This operation assigns a feature variation to one given user session. You pass in an + * entityID that represents the user. Evidently then checks the evaluation rules + * and assigns the variation.

                                  + *

                                  The first rules that are evaluated are the override rules. If the user's + * entityID matches an override rule, the user is served the variation specified + * by that rule.

                                  + *

                                  Next, if there is a launch of the feature, the user might be assigned to a variation in + * the launch. The chance of this depends on the percentage of users that are allocated to that + * launch. If the user is enrolled in the launch, the variation they are served depends on the + * allocation of the various feature variations used for the launch.

                                  + *

                                  If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might + * be assigned to a variation in the experiment. The chance of this + * depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, + * the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                  + *

                                  If the user is not assigned to a launch or experiment, they are served the default variation.

                                  + */ + public evaluateFeature( + args: EvaluateFeatureCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public evaluateFeature( + args: EvaluateFeatureCommandInput, + cb: (err: any, data?: EvaluateFeatureCommandOutput) => void + ): void; + public evaluateFeature( + args: EvaluateFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: EvaluateFeatureCommandOutput) => void + ): void; + public evaluateFeature( + args: EvaluateFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: EvaluateFeatureCommandOutput) => void), + cb?: (err: any, data?: EvaluateFeatureCommandOutput) => void + ): Promise | void { + const command = new EvaluateFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the details about one experiment. You must already know the + * experiment name. To retrieve a list of experiments in your account, use ListExperiments.

                                  + */ + public getExperiment( + args: GetExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getExperiment( + args: GetExperimentCommandInput, + cb: (err: any, data?: GetExperimentCommandOutput) => void + ): void; + public getExperiment( + args: GetExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetExperimentCommandOutput) => void + ): void; + public getExperiment( + args: GetExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetExperimentCommandOutput) => void), + cb?: (err: any, data?: GetExperimentCommandOutput) => void + ): Promise | void { + const command = new GetExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves the results of a running or completed experiment.

                                  + */ + public getExperimentResults( + args: GetExperimentResultsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getExperimentResults( + args: GetExperimentResultsCommandInput, + cb: (err: any, data?: GetExperimentResultsCommandOutput) => void + ): void; + public getExperimentResults( + args: GetExperimentResultsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetExperimentResultsCommandOutput) => void + ): void; + public getExperimentResults( + args: GetExperimentResultsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetExperimentResultsCommandOutput) => void), + cb?: (err: any, data?: GetExperimentResultsCommandOutput) => void + ): Promise | void { + const command = new GetExperimentResultsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the details about one feature. You must already know the feature name. To + * retrieve a list of features in your account, use ListFeatures.

                                  + */ + public getFeature(args: GetFeatureCommandInput, options?: __HttpHandlerOptions): Promise; + public getFeature(args: GetFeatureCommandInput, cb: (err: any, data?: GetFeatureCommandOutput) => void): void; + public getFeature( + args: GetFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetFeatureCommandOutput) => void + ): void; + public getFeature( + args: GetFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetFeatureCommandOutput) => void), + cb?: (err: any, data?: GetFeatureCommandOutput) => void + ): Promise | void { + const command = new GetFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the details about one launch. You must already know the + * launch name. To retrieve a list of launches in your account, use ListLaunches.

                                  + */ + public getLaunch(args: GetLaunchCommandInput, options?: __HttpHandlerOptions): Promise; + public getLaunch(args: GetLaunchCommandInput, cb: (err: any, data?: GetLaunchCommandOutput) => void): void; + public getLaunch( + args: GetLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetLaunchCommandOutput) => void + ): void; + public getLaunch( + args: GetLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetLaunchCommandOutput) => void), + cb?: (err: any, data?: GetLaunchCommandOutput) => void + ): Promise | void { + const command = new GetLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the details about one launch. You must already know the + * project name. To retrieve a list of projects in your account, use ListProjects.

                                  + */ + public getProject(args: GetProjectCommandInput, options?: __HttpHandlerOptions): Promise; + public getProject(args: GetProjectCommandInput, cb: (err: any, data?: GetProjectCommandOutput) => void): void; + public getProject( + args: GetProjectCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetProjectCommandOutput) => void + ): void; + public getProject( + args: GetProjectCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetProjectCommandOutput) => void), + cb?: (err: any, data?: GetProjectCommandOutput) => void + ): Promise | void { + const command = new GetProjectCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns configuration details about all the experiments in the specified project.

                                  + */ + public listExperiments( + args: ListExperimentsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listExperiments( + args: ListExperimentsCommandInput, + cb: (err: any, data?: ListExperimentsCommandOutput) => void + ): void; + public listExperiments( + args: ListExperimentsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListExperimentsCommandOutput) => void + ): void; + public listExperiments( + args: ListExperimentsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListExperimentsCommandOutput) => void), + cb?: (err: any, data?: ListExperimentsCommandOutput) => void + ): Promise | void { + const command = new ListExperimentsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns configuration details about all the features in the specified project.

                                  + */ + public listFeatures( + args: ListFeaturesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listFeatures(args: ListFeaturesCommandInput, cb: (err: any, data?: ListFeaturesCommandOutput) => void): void; + public listFeatures( + args: ListFeaturesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListFeaturesCommandOutput) => void + ): void; + public listFeatures( + args: ListFeaturesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListFeaturesCommandOutput) => void), + cb?: (err: any, data?: ListFeaturesCommandOutput) => void + ): Promise | void { + const command = new ListFeaturesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns configuration details about all the launches in the specified project.

                                  + */ + public listLaunches( + args: ListLaunchesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listLaunches(args: ListLaunchesCommandInput, cb: (err: any, data?: ListLaunchesCommandOutput) => void): void; + public listLaunches( + args: ListLaunchesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListLaunchesCommandOutput) => void + ): void; + public listLaunches( + args: ListLaunchesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListLaunchesCommandOutput) => void), + cb?: (err: any, data?: ListLaunchesCommandOutput) => void + ): Promise | void { + const command = new ListLaunchesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns configuration details about all the projects in the current Region in your + * account.

                                  + */ + public listProjects( + args: ListProjectsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listProjects(args: ListProjectsCommandInput, cb: (err: any, data?: ListProjectsCommandOutput) => void): void; + public listProjects( + args: ListProjectsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListProjectsCommandOutput) => void + ): void; + public listProjects( + args: ListProjectsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListProjectsCommandOutput) => void), + cb?: (err: any, data?: ListProjectsCommandOutput) => void + ): Promise | void { + const command = new ListProjectsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Displays the tags associated with an Evidently resource.

                                  + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Sends performance events to Evidently. These events can be used to evaluate a launch or + * an experiment.

                                  + */ + public putProjectEvents( + args: PutProjectEventsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putProjectEvents( + args: PutProjectEventsCommandInput, + cb: (err: any, data?: PutProjectEventsCommandOutput) => void + ): void; + public putProjectEvents( + args: PutProjectEventsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutProjectEventsCommandOutput) => void + ): void; + public putProjectEvents( + args: PutProjectEventsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: PutProjectEventsCommandOutput) => void), + cb?: (err: any, data?: PutProjectEventsCommandOutput) => void + ): Promise | void { + const command = new PutProjectEventsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Starts an existing experiment. To create an experiment, + * use CreateExperiment.

                                  + */ + public startExperiment( + args: StartExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public startExperiment( + args: StartExperimentCommandInput, + cb: (err: any, data?: StartExperimentCommandOutput) => void + ): void; + public startExperiment( + args: StartExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartExperimentCommandOutput) => void + ): void; + public startExperiment( + args: StartExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartExperimentCommandOutput) => void), + cb?: (err: any, data?: StartExperimentCommandOutput) => void + ): Promise | void { + const command = new StartExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Starts an existing launch. To create a launch, + * use CreateLaunch.

                                  + */ + public startLaunch(args: StartLaunchCommandInput, options?: __HttpHandlerOptions): Promise; + public startLaunch(args: StartLaunchCommandInput, cb: (err: any, data?: StartLaunchCommandOutput) => void): void; + public startLaunch( + args: StartLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartLaunchCommandOutput) => void + ): void; + public startLaunch( + args: StartLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartLaunchCommandOutput) => void), + cb?: (err: any, data?: StartLaunchCommandOutput) => void + ): Promise | void { + const command = new StartLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Stops an experiment that is currently running. If you stop an experiment, you can't + * resume it or restart it.

                                  + */ + public stopExperiment( + args: StopExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public stopExperiment( + args: StopExperimentCommandInput, + cb: (err: any, data?: StopExperimentCommandOutput) => void + ): void; + public stopExperiment( + args: StopExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StopExperimentCommandOutput) => void + ): void; + public stopExperiment( + args: StopExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StopExperimentCommandOutput) => void), + cb?: (err: any, data?: StopExperimentCommandOutput) => void + ): Promise | void { + const command = new StopExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Stops a launch that is currently running. After you stop a launch, you will not be able to resume it or restart it. + * Also, it + * will not be evaluated as a rule for traffic allocation, and the traffic that was allocated to the launch + * will instead be available to the feature's experiment, if there is one. Otherwise, all traffic + * will be served the default variation after the launch is stopped.

                                  + */ + public stopLaunch(args: StopLaunchCommandInput, options?: __HttpHandlerOptions): Promise; + public stopLaunch(args: StopLaunchCommandInput, cb: (err: any, data?: StopLaunchCommandOutput) => void): void; + public stopLaunch( + args: StopLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StopLaunchCommandOutput) => void + ): void; + public stopLaunch( + args: StopLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StopLaunchCommandOutput) => void), + cb?: (err: any, data?: StopLaunchCommandOutput) => void + ): Promise | void { + const command = new StopLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Assigns one or more tags (key-value pairs) to the specified CloudWatch Evidently resource. Projects, + * features, launches, and experiments can be tagged.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + *

                                  You can use the TagResource action with a resource that already has tags. + * If you specify a new tag key for the resource, + * this tag is appended to the list of tags associated + * with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces + * the previous value for that tag.

                                  + *

                                  You can associate as many as 50 tags with a resource.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Removes one or more tags from the specified resource.

                                  + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates an Evidently experiment.

                                  + *

                                  Don't use this operation to update an experiment's tag. Instead, use + * TagResource.

                                  + */ + public updateExperiment( + args: UpdateExperimentCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateExperiment( + args: UpdateExperimentCommandInput, + cb: (err: any, data?: UpdateExperimentCommandOutput) => void + ): void; + public updateExperiment( + args: UpdateExperimentCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateExperimentCommandOutput) => void + ): void; + public updateExperiment( + args: UpdateExperimentCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateExperimentCommandOutput) => void), + cb?: (err: any, data?: UpdateExperimentCommandOutput) => void + ): Promise | void { + const command = new UpdateExperimentCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates an existing feature.

                                  + *

                                  You can't use this operation to update the tags of an existing feature. Instead, use + * TagResource.

                                  + */ + public updateFeature( + args: UpdateFeatureCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateFeature( + args: UpdateFeatureCommandInput, + cb: (err: any, data?: UpdateFeatureCommandOutput) => void + ): void; + public updateFeature( + args: UpdateFeatureCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateFeatureCommandOutput) => void + ): void; + public updateFeature( + args: UpdateFeatureCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateFeatureCommandOutput) => void), + cb?: (err: any, data?: UpdateFeatureCommandOutput) => void + ): Promise | void { + const command = new UpdateFeatureCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates a launch of a given feature.

                                  + *

                                  Don't use this operation to update the tags of an existing launch. Instead, use + * TagResource.

                                  + */ + public updateLaunch( + args: UpdateLaunchCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateLaunch(args: UpdateLaunchCommandInput, cb: (err: any, data?: UpdateLaunchCommandOutput) => void): void; + public updateLaunch( + args: UpdateLaunchCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateLaunchCommandOutput) => void + ): void; + public updateLaunch( + args: UpdateLaunchCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateLaunchCommandOutput) => void), + cb?: (err: any, data?: UpdateLaunchCommandOutput) => void + ): Promise | void { + const command = new UpdateLaunchCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the description of an existing project.

                                  + *

                                  To create a new project, use CreateProject.

                                  + *

                                  Don't use this operation to update the data storage options of a project. Instead, use + * UpdateProjectDataDelivery.

                                  + *

                                  Don't use this operation to update the tags of a project. Instead, use + * TagResource.

                                  + */ + public updateProject( + args: UpdateProjectCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateProject( + args: UpdateProjectCommandInput, + cb: (err: any, data?: UpdateProjectCommandOutput) => void + ): void; + public updateProject( + args: UpdateProjectCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateProjectCommandOutput) => void + ): void; + public updateProject( + args: UpdateProjectCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateProjectCommandOutput) => void), + cb?: (err: any, data?: UpdateProjectCommandOutput) => void + ): Promise | void { + const command = new UpdateProjectCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the data storage options for this project. If you store evaluation events, you an + * keep them and analyze them on your own. If you choose not to store evaluation events, + * Evidently deletes them after using them to produce metrics and other experiment results that + * you can view.

                                  + *

                                  You can't specify both cloudWatchLogs and s3Destination in the same operation.

                                  + */ + public updateProjectDataDelivery( + args: UpdateProjectDataDeliveryCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateProjectDataDelivery( + args: UpdateProjectDataDeliveryCommandInput, + cb: (err: any, data?: UpdateProjectDataDeliveryCommandOutput) => void + ): void; + public updateProjectDataDelivery( + args: UpdateProjectDataDeliveryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateProjectDataDeliveryCommandOutput) => void + ): void; + public updateProjectDataDelivery( + args: UpdateProjectDataDeliveryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateProjectDataDeliveryCommandOutput) => void), + cb?: (err: any, data?: UpdateProjectDataDeliveryCommandOutput) => void + ): Promise | void { + const command = new UpdateProjectDataDeliveryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-evidently/src/EvidentlyClient.ts b/clients/client-evidently/src/EvidentlyClient.ts new file mode 100644 index 000000000000..5322b7f4ba4a --- /dev/null +++ b/clients/client-evidently/src/EvidentlyClient.ts @@ -0,0 +1,362 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { + BatchEvaluateFeatureCommandInput, + BatchEvaluateFeatureCommandOutput, +} from "./commands/BatchEvaluateFeatureCommand"; +import { CreateExperimentCommandInput, CreateExperimentCommandOutput } from "./commands/CreateExperimentCommand"; +import { CreateFeatureCommandInput, CreateFeatureCommandOutput } from "./commands/CreateFeatureCommand"; +import { CreateLaunchCommandInput, CreateLaunchCommandOutput } from "./commands/CreateLaunchCommand"; +import { CreateProjectCommandInput, CreateProjectCommandOutput } from "./commands/CreateProjectCommand"; +import { DeleteExperimentCommandInput, DeleteExperimentCommandOutput } from "./commands/DeleteExperimentCommand"; +import { DeleteFeatureCommandInput, DeleteFeatureCommandOutput } from "./commands/DeleteFeatureCommand"; +import { DeleteLaunchCommandInput, DeleteLaunchCommandOutput } from "./commands/DeleteLaunchCommand"; +import { DeleteProjectCommandInput, DeleteProjectCommandOutput } from "./commands/DeleteProjectCommand"; +import { EvaluateFeatureCommandInput, EvaluateFeatureCommandOutput } from "./commands/EvaluateFeatureCommand"; +import { GetExperimentCommandInput, GetExperimentCommandOutput } from "./commands/GetExperimentCommand"; +import { + GetExperimentResultsCommandInput, + GetExperimentResultsCommandOutput, +} from "./commands/GetExperimentResultsCommand"; +import { GetFeatureCommandInput, GetFeatureCommandOutput } from "./commands/GetFeatureCommand"; +import { GetLaunchCommandInput, GetLaunchCommandOutput } from "./commands/GetLaunchCommand"; +import { GetProjectCommandInput, GetProjectCommandOutput } from "./commands/GetProjectCommand"; +import { ListExperimentsCommandInput, ListExperimentsCommandOutput } from "./commands/ListExperimentsCommand"; +import { ListFeaturesCommandInput, ListFeaturesCommandOutput } from "./commands/ListFeaturesCommand"; +import { ListLaunchesCommandInput, ListLaunchesCommandOutput } from "./commands/ListLaunchesCommand"; +import { ListProjectsCommandInput, ListProjectsCommandOutput } from "./commands/ListProjectsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { PutProjectEventsCommandInput, PutProjectEventsCommandOutput } from "./commands/PutProjectEventsCommand"; +import { StartExperimentCommandInput, StartExperimentCommandOutput } from "./commands/StartExperimentCommand"; +import { StartLaunchCommandInput, StartLaunchCommandOutput } from "./commands/StartLaunchCommand"; +import { StopExperimentCommandInput, StopExperimentCommandOutput } from "./commands/StopExperimentCommand"; +import { StopLaunchCommandInput, StopLaunchCommandOutput } from "./commands/StopLaunchCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { UpdateExperimentCommandInput, UpdateExperimentCommandOutput } from "./commands/UpdateExperimentCommand"; +import { UpdateFeatureCommandInput, UpdateFeatureCommandOutput } from "./commands/UpdateFeatureCommand"; +import { UpdateLaunchCommandInput, UpdateLaunchCommandOutput } from "./commands/UpdateLaunchCommand"; +import { UpdateProjectCommandInput, UpdateProjectCommandOutput } from "./commands/UpdateProjectCommand"; +import { + UpdateProjectDataDeliveryCommandInput, + UpdateProjectDataDeliveryCommandOutput, +} from "./commands/UpdateProjectDataDeliveryCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | BatchEvaluateFeatureCommandInput + | CreateExperimentCommandInput + | CreateFeatureCommandInput + | CreateLaunchCommandInput + | CreateProjectCommandInput + | DeleteExperimentCommandInput + | DeleteFeatureCommandInput + | DeleteLaunchCommandInput + | DeleteProjectCommandInput + | EvaluateFeatureCommandInput + | GetExperimentCommandInput + | GetExperimentResultsCommandInput + | GetFeatureCommandInput + | GetLaunchCommandInput + | GetProjectCommandInput + | ListExperimentsCommandInput + | ListFeaturesCommandInput + | ListLaunchesCommandInput + | ListProjectsCommandInput + | ListTagsForResourceCommandInput + | PutProjectEventsCommandInput + | StartExperimentCommandInput + | StartLaunchCommandInput + | StopExperimentCommandInput + | StopLaunchCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateExperimentCommandInput + | UpdateFeatureCommandInput + | UpdateLaunchCommandInput + | UpdateProjectCommandInput + | UpdateProjectDataDeliveryCommandInput; + +export type ServiceOutputTypes = + | BatchEvaluateFeatureCommandOutput + | CreateExperimentCommandOutput + | CreateFeatureCommandOutput + | CreateLaunchCommandOutput + | CreateProjectCommandOutput + | DeleteExperimentCommandOutput + | DeleteFeatureCommandOutput + | DeleteLaunchCommandOutput + | DeleteProjectCommandOutput + | EvaluateFeatureCommandOutput + | GetExperimentCommandOutput + | GetExperimentResultsCommandOutput + | GetFeatureCommandOutput + | GetLaunchCommandOutput + | GetProjectCommandOutput + | ListExperimentsCommandOutput + | ListFeaturesCommandOutput + | ListLaunchesCommandOutput + | ListProjectsCommandOutput + | ListTagsForResourceCommandOutput + | PutProjectEventsCommandOutput + | StartExperimentCommandOutput + | StartLaunchCommandOutput + | StopExperimentCommandOutput + | StopLaunchCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateExperimentCommandOutput + | UpdateFeatureCommandOutput + | UpdateLaunchCommandOutput + | UpdateProjectCommandOutput + | UpdateProjectDataDeliveryCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type EvidentlyClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of EvidentlyClient class constructor that set the region, credentials and other options. + */ +export interface EvidentlyClientConfig extends EvidentlyClientConfigType {} + +type EvidentlyClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of EvidentlyClient class. This is resolved and normalized from the {@link EvidentlyClientConfig | constructor configuration interface}. + */ +export interface EvidentlyClientResolvedConfig extends EvidentlyClientResolvedConfigType {} + +/** + *

                                  You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage + * of your users while you roll out the feature. You can monitor the performance of the new feature + * to help you decide when to ramp up traffic to your users. This helps you + * reduce risk and identify unintended consequences before you fully launch the feature.

                                  + *

                                  You can also conduct A/B experiments to make feature design decisions based on evidence + * and data. An experiment can test as many as five variations at once. Evidently collects + * experiment data and analyzes it using statistical methods. It also provides clear + * recommendations about which variations perform better. You can test both user-facing features + * and backend features.

                                  + */ +export class EvidentlyClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + EvidentlyClientResolvedConfig +> { + /** + * The resolved configuration of EvidentlyClient class. This is resolved and normalized from the {@link EvidentlyClientConfig | constructor configuration interface}. + */ + readonly config: EvidentlyClientResolvedConfig; + + constructor(configuration: EvidentlyClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-evidently/src/commands/BatchEvaluateFeatureCommand.ts b/clients/client-evidently/src/commands/BatchEvaluateFeatureCommand.ts new file mode 100644 index 000000000000..e74dff87ed40 --- /dev/null +++ b/clients/client-evidently/src/commands/BatchEvaluateFeatureCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { BatchEvaluateFeatureRequest, BatchEvaluateFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1BatchEvaluateFeatureCommand, + serializeAws_restJson1BatchEvaluateFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface BatchEvaluateFeatureCommandInput extends BatchEvaluateFeatureRequest {} +export interface BatchEvaluateFeatureCommandOutput extends BatchEvaluateFeatureResponse, __MetadataBearer {} + +/** + *

                                  This operation assigns feature variation to user sessions. For each user session, you pass + * in an entityID that represents the user. Evidently then checks the evaluation + * rules and assigns the variation.

                                  + *

                                  The first rules that are evaluated are the override rules. If the user's + * entityID matches an override rule, the user is served the variation specified + * by that rule.

                                  + *

                                  Next, if there is a launch of the feature, the user might be assigned to a variation in + * the launch. The chance of this depends on the percentage of users that are allocated to that + * launch. If the user is enrolled in the launch, the variation they are served depends on the + * allocation of the various feature variations used for the launch.

                                  + *

                                  If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might + * be assigned to a variation in the experiment. The chance of this + * depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, + * the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                  + *

                                  If the user is not assigned to a launch or experiment, they are served the default variation.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, BatchEvaluateFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, BatchEvaluateFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new BatchEvaluateFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link BatchEvaluateFeatureCommandInput} for command's `input` shape. + * @see {@link BatchEvaluateFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class BatchEvaluateFeatureCommand extends $Command< + BatchEvaluateFeatureCommandInput, + BatchEvaluateFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchEvaluateFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "BatchEvaluateFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: BatchEvaluateFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: BatchEvaluateFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: BatchEvaluateFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1BatchEvaluateFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1BatchEvaluateFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/CreateExperimentCommand.ts b/clients/client-evidently/src/commands/CreateExperimentCommand.ts new file mode 100644 index 000000000000..f919d2a4c477 --- /dev/null +++ b/clients/client-evidently/src/commands/CreateExperimentCommand.ts @@ -0,0 +1,102 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { CreateExperimentRequest, CreateExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateExperimentCommand, + serializeAws_restJson1CreateExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateExperimentCommandInput extends CreateExperimentRequest {} +export interface CreateExperimentCommandOutput extends CreateExperimentResponse, __MetadataBearer {} + +/** + *

                                  Creates an Evidently experiment. Before you create an experiment, + * you must create the feature to use for the experiment.

                                  + *

                                  An experiment helps you make feature design + * decisions based on evidence and data. An experiment can test as + * many as five variations at once. Evidently collects experiment data and analyzes it by statistical methods, and provides + * clear recommendations about which variations perform better.

                                  + *

                                  Don't use this operation to update an existing experiment. Instead, use + * UpdateExperiment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, CreateExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, CreateExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new CreateExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateExperimentCommandInput} for command's `input` shape. + * @see {@link CreateExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class CreateExperimentCommand extends $Command< + CreateExperimentCommandInput, + CreateExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "CreateExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/CreateFeatureCommand.ts b/clients/client-evidently/src/commands/CreateFeatureCommand.ts new file mode 100644 index 000000000000..2661258a058e --- /dev/null +++ b/clients/client-evidently/src/commands/CreateFeatureCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { CreateFeatureRequest, CreateFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateFeatureCommand, + serializeAws_restJson1CreateFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateFeatureCommandInput extends CreateFeatureRequest {} +export interface CreateFeatureCommandOutput extends CreateFeatureResponse, __MetadataBearer {} + +/** + *

                                  Creates an Evidently feature that you want to launch or test. You can define up to + * five variations of a feature, and use these variations in your launches and experiments. A feature must be created in + * a project. For information about creating a project, see CreateProject.

                                  + *

                                  Don't use this operation to update an existing feature. Instead, use + * UpdateFeature.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, CreateFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, CreateFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new CreateFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateFeatureCommandInput} for command's `input` shape. + * @see {@link CreateFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class CreateFeatureCommand extends $Command< + CreateFeatureCommandInput, + CreateFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "CreateFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/CreateLaunchCommand.ts b/clients/client-evidently/src/commands/CreateLaunchCommand.ts new file mode 100644 index 000000000000..dde8ebf69825 --- /dev/null +++ b/clients/client-evidently/src/commands/CreateLaunchCommand.ts @@ -0,0 +1,102 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { CreateLaunchRequest, CreateLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateLaunchCommand, + serializeAws_restJson1CreateLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateLaunchCommandInput extends CreateLaunchRequest {} +export interface CreateLaunchCommandOutput extends CreateLaunchResponse, __MetadataBearer {} + +/** + *

                                  Creates a launch of a given feature. Before you create a launch, you + * must create the feature to use for the launch.

                                  + *

                                  You can use a launch to safely validate new features by serving them to a specified + * percentage of your users while you roll out the feature. You can monitor the performance of + * the new feature to help you decide when to ramp up traffic to more users. This helps you + * reduce risk and identify unintended consequences before you fully launch the feature.

                                  + *

                                  Don't use this operation to update an existing launch. Instead, use + * UpdateLaunch.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, CreateLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, CreateLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new CreateLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateLaunchCommandInput} for command's `input` shape. + * @see {@link CreateLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class CreateLaunchCommand extends $Command< + CreateLaunchCommandInput, + CreateLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "CreateLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/CreateProjectCommand.ts b/clients/client-evidently/src/commands/CreateProjectCommand.ts new file mode 100644 index 000000000000..c8645c2db885 --- /dev/null +++ b/clients/client-evidently/src/commands/CreateProjectCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { CreateProjectRequest, CreateProjectResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateProjectCommand, + serializeAws_restJson1CreateProjectCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateProjectCommandInput extends CreateProjectRequest {} +export interface CreateProjectCommandOutput extends CreateProjectResponse, __MetadataBearer {} + +/** + *

                                  Creates a project, which is the logical object in Evidently that can contain features, launches, and + * experiments. Use projects to group similar features together.

                                  + *

                                  To update an existing project, use UpdateProject.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, CreateProjectCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, CreateProjectCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new CreateProjectCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateProjectCommandInput} for command's `input` shape. + * @see {@link CreateProjectCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class CreateProjectCommand extends $Command< + CreateProjectCommandInput, + CreateProjectCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateProjectCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "CreateProjectCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateProjectRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateProjectResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateProjectCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateProjectCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateProjectCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/DeleteExperimentCommand.ts b/clients/client-evidently/src/commands/DeleteExperimentCommand.ts new file mode 100644 index 000000000000..fdd5aa84f91f --- /dev/null +++ b/clients/client-evidently/src/commands/DeleteExperimentCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { DeleteExperimentRequest, DeleteExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteExperimentCommand, + serializeAws_restJson1DeleteExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteExperimentCommandInput extends DeleteExperimentRequest {} +export interface DeleteExperimentCommandOutput extends DeleteExperimentResponse, __MetadataBearer {} + +/** + *

                                  Deletes an Evidently experiment. The feature used for the experiment is not deleted.

                                  + *

                                  To stop an experiment without deleting it, use StopExperiment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, DeleteExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, DeleteExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new DeleteExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteExperimentCommandInput} for command's `input` shape. + * @see {@link DeleteExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class DeleteExperimentCommand extends $Command< + DeleteExperimentCommandInput, + DeleteExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "DeleteExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/DeleteFeatureCommand.ts b/clients/client-evidently/src/commands/DeleteFeatureCommand.ts new file mode 100644 index 000000000000..4d2e9cfe096c --- /dev/null +++ b/clients/client-evidently/src/commands/DeleteFeatureCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { DeleteFeatureRequest, DeleteFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteFeatureCommand, + serializeAws_restJson1DeleteFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteFeatureCommandInput extends DeleteFeatureRequest {} +export interface DeleteFeatureCommandOutput extends DeleteFeatureResponse, __MetadataBearer {} + +/** + *

                                  Deletes an Evidently feature.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, DeleteFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, DeleteFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new DeleteFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteFeatureCommandInput} for command's `input` shape. + * @see {@link DeleteFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class DeleteFeatureCommand extends $Command< + DeleteFeatureCommandInput, + DeleteFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "DeleteFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/DeleteLaunchCommand.ts b/clients/client-evidently/src/commands/DeleteLaunchCommand.ts new file mode 100644 index 000000000000..283552254919 --- /dev/null +++ b/clients/client-evidently/src/commands/DeleteLaunchCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { DeleteLaunchRequest, DeleteLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteLaunchCommand, + serializeAws_restJson1DeleteLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteLaunchCommandInput extends DeleteLaunchRequest {} +export interface DeleteLaunchCommandOutput extends DeleteLaunchResponse, __MetadataBearer {} + +/** + *

                                  Deletes an Evidently launch. The feature used for the launch is not deleted.

                                  + *

                                  To stop a launch without deleting it, use StopLaunch.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, DeleteLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, DeleteLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new DeleteLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteLaunchCommandInput} for command's `input` shape. + * @see {@link DeleteLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class DeleteLaunchCommand extends $Command< + DeleteLaunchCommandInput, + DeleteLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "DeleteLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/DeleteProjectCommand.ts b/clients/client-evidently/src/commands/DeleteProjectCommand.ts new file mode 100644 index 000000000000..b1f74dfaaf81 --- /dev/null +++ b/clients/client-evidently/src/commands/DeleteProjectCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { DeleteProjectRequest, DeleteProjectResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteProjectCommand, + serializeAws_restJson1DeleteProjectCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteProjectCommandInput extends DeleteProjectRequest {} +export interface DeleteProjectCommandOutput extends DeleteProjectResponse, __MetadataBearer {} + +/** + *

                                  Deletes an Evidently project. Before you can delete a project, you must delete all the + * features that the project contains. To delete a feature, use DeleteFeature.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, DeleteProjectCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, DeleteProjectCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new DeleteProjectCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteProjectCommandInput} for command's `input` shape. + * @see {@link DeleteProjectCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class DeleteProjectCommand extends $Command< + DeleteProjectCommandInput, + DeleteProjectCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteProjectCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "DeleteProjectCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteProjectRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteProjectResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteProjectCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteProjectCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteProjectCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/EvaluateFeatureCommand.ts b/clients/client-evidently/src/commands/EvaluateFeatureCommand.ts new file mode 100644 index 000000000000..ef26f7e5af67 --- /dev/null +++ b/clients/client-evidently/src/commands/EvaluateFeatureCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { EvaluateFeatureRequest, EvaluateFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1EvaluateFeatureCommand, + serializeAws_restJson1EvaluateFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface EvaluateFeatureCommandInput extends EvaluateFeatureRequest {} +export interface EvaluateFeatureCommandOutput extends EvaluateFeatureResponse, __MetadataBearer {} + +/** + *

                                  This operation assigns a feature variation to one given user session. You pass in an + * entityID that represents the user. Evidently then checks the evaluation rules + * and assigns the variation.

                                  + *

                                  The first rules that are evaluated are the override rules. If the user's + * entityID matches an override rule, the user is served the variation specified + * by that rule.

                                  + *

                                  Next, if there is a launch of the feature, the user might be assigned to a variation in + * the launch. The chance of this depends on the percentage of users that are allocated to that + * launch. If the user is enrolled in the launch, the variation they are served depends on the + * allocation of the various feature variations used for the launch.

                                  + *

                                  If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might + * be assigned to a variation in the experiment. The chance of this + * depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, + * the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                  + *

                                  If the user is not assigned to a launch or experiment, they are served the default variation.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, EvaluateFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, EvaluateFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new EvaluateFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EvaluateFeatureCommandInput} for command's `input` shape. + * @see {@link EvaluateFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class EvaluateFeatureCommand extends $Command< + EvaluateFeatureCommandInput, + EvaluateFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EvaluateFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "EvaluateFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: EvaluateFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: EvaluateFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: EvaluateFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1EvaluateFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1EvaluateFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/GetExperimentCommand.ts b/clients/client-evidently/src/commands/GetExperimentCommand.ts new file mode 100644 index 000000000000..42ee3792174c --- /dev/null +++ b/clients/client-evidently/src/commands/GetExperimentCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { GetExperimentRequest, GetExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetExperimentCommand, + serializeAws_restJson1GetExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetExperimentCommandInput extends GetExperimentRequest {} +export interface GetExperimentCommandOutput extends GetExperimentResponse, __MetadataBearer {} + +/** + *

                                  Returns the details about one experiment. You must already know the + * experiment name. To retrieve a list of experiments in your account, use ListExperiments.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, GetExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, GetExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new GetExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetExperimentCommandInput} for command's `input` shape. + * @see {@link GetExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class GetExperimentCommand extends $Command< + GetExperimentCommandInput, + GetExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "GetExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/GetExperimentResultsCommand.ts b/clients/client-evidently/src/commands/GetExperimentResultsCommand.ts new file mode 100644 index 000000000000..2078acf7fdcb --- /dev/null +++ b/clients/client-evidently/src/commands/GetExperimentResultsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { GetExperimentResultsRequest, GetExperimentResultsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetExperimentResultsCommand, + serializeAws_restJson1GetExperimentResultsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetExperimentResultsCommandInput extends GetExperimentResultsRequest {} +export interface GetExperimentResultsCommandOutput extends GetExperimentResultsResponse, __MetadataBearer {} + +/** + *

                                  Retrieves the results of a running or completed experiment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, GetExperimentResultsCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, GetExperimentResultsCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new GetExperimentResultsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetExperimentResultsCommandInput} for command's `input` shape. + * @see {@link GetExperimentResultsCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class GetExperimentResultsCommand extends $Command< + GetExperimentResultsCommandInput, + GetExperimentResultsCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetExperimentResultsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "GetExperimentResultsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetExperimentResultsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetExperimentResultsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetExperimentResultsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetExperimentResultsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetExperimentResultsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/GetFeatureCommand.ts b/clients/client-evidently/src/commands/GetFeatureCommand.ts new file mode 100644 index 000000000000..57132100b6cf --- /dev/null +++ b/clients/client-evidently/src/commands/GetFeatureCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { GetFeatureRequest, GetFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetFeatureCommand, + serializeAws_restJson1GetFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetFeatureCommandInput extends GetFeatureRequest {} +export interface GetFeatureCommandOutput extends GetFeatureResponse, __MetadataBearer {} + +/** + *

                                  Returns the details about one feature. You must already know the feature name. To + * retrieve a list of features in your account, use ListFeatures.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, GetFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, GetFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new GetFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetFeatureCommandInput} for command's `input` shape. + * @see {@link GetFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class GetFeatureCommand extends $Command< + GetFeatureCommandInput, + GetFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "GetFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/GetLaunchCommand.ts b/clients/client-evidently/src/commands/GetLaunchCommand.ts new file mode 100644 index 000000000000..8286a14173aa --- /dev/null +++ b/clients/client-evidently/src/commands/GetLaunchCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { GetLaunchRequest, GetLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetLaunchCommand, + serializeAws_restJson1GetLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetLaunchCommandInput extends GetLaunchRequest {} +export interface GetLaunchCommandOutput extends GetLaunchResponse, __MetadataBearer {} + +/** + *

                                  Returns the details about one launch. You must already know the + * launch name. To retrieve a list of launches in your account, use ListLaunches.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, GetLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, GetLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new GetLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetLaunchCommandInput} for command's `input` shape. + * @see {@link GetLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class GetLaunchCommand extends $Command< + GetLaunchCommandInput, + GetLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "GetLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/GetProjectCommand.ts b/clients/client-evidently/src/commands/GetProjectCommand.ts new file mode 100644 index 000000000000..5f3420808d57 --- /dev/null +++ b/clients/client-evidently/src/commands/GetProjectCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { GetProjectRequest, GetProjectResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetProjectCommand, + serializeAws_restJson1GetProjectCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetProjectCommandInput extends GetProjectRequest {} +export interface GetProjectCommandOutput extends GetProjectResponse, __MetadataBearer {} + +/** + *

                                  Returns the details about one launch. You must already know the + * project name. To retrieve a list of projects in your account, use ListProjects.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, GetProjectCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, GetProjectCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new GetProjectCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetProjectCommandInput} for command's `input` shape. + * @see {@link GetProjectCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class GetProjectCommand extends $Command< + GetProjectCommandInput, + GetProjectCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetProjectCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "GetProjectCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetProjectRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetProjectResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetProjectCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetProjectCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetProjectCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/ListExperimentsCommand.ts b/clients/client-evidently/src/commands/ListExperimentsCommand.ts new file mode 100644 index 000000000000..0a93beb22d8d --- /dev/null +++ b/clients/client-evidently/src/commands/ListExperimentsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { ListExperimentsRequest, ListExperimentsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListExperimentsCommand, + serializeAws_restJson1ListExperimentsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListExperimentsCommandInput extends ListExperimentsRequest {} +export interface ListExperimentsCommandOutput extends ListExperimentsResponse, __MetadataBearer {} + +/** + *

                                  Returns configuration details about all the experiments in the specified project.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, ListExperimentsCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, ListExperimentsCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new ListExperimentsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListExperimentsCommandInput} for command's `input` shape. + * @see {@link ListExperimentsCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class ListExperimentsCommand extends $Command< + ListExperimentsCommandInput, + ListExperimentsCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListExperimentsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "ListExperimentsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListExperimentsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListExperimentsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListExperimentsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListExperimentsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListExperimentsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/ListFeaturesCommand.ts b/clients/client-evidently/src/commands/ListFeaturesCommand.ts new file mode 100644 index 000000000000..b072592e3888 --- /dev/null +++ b/clients/client-evidently/src/commands/ListFeaturesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { ListFeaturesRequest, ListFeaturesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListFeaturesCommand, + serializeAws_restJson1ListFeaturesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListFeaturesCommandInput extends ListFeaturesRequest {} +export interface ListFeaturesCommandOutput extends ListFeaturesResponse, __MetadataBearer {} + +/** + *

                                  Returns configuration details about all the features in the specified project.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, ListFeaturesCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, ListFeaturesCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new ListFeaturesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListFeaturesCommandInput} for command's `input` shape. + * @see {@link ListFeaturesCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class ListFeaturesCommand extends $Command< + ListFeaturesCommandInput, + ListFeaturesCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListFeaturesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "ListFeaturesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListFeaturesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListFeaturesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListFeaturesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListFeaturesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListFeaturesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/ListLaunchesCommand.ts b/clients/client-evidently/src/commands/ListLaunchesCommand.ts new file mode 100644 index 000000000000..e7dee0fdb1db --- /dev/null +++ b/clients/client-evidently/src/commands/ListLaunchesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { ListLaunchesRequest, ListLaunchesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListLaunchesCommand, + serializeAws_restJson1ListLaunchesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListLaunchesCommandInput extends ListLaunchesRequest {} +export interface ListLaunchesCommandOutput extends ListLaunchesResponse, __MetadataBearer {} + +/** + *

                                  Returns configuration details about all the launches in the specified project.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, ListLaunchesCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, ListLaunchesCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new ListLaunchesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListLaunchesCommandInput} for command's `input` shape. + * @see {@link ListLaunchesCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class ListLaunchesCommand extends $Command< + ListLaunchesCommandInput, + ListLaunchesCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListLaunchesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "ListLaunchesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListLaunchesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListLaunchesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListLaunchesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListLaunchesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListLaunchesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/ListProjectsCommand.ts b/clients/client-evidently/src/commands/ListProjectsCommand.ts new file mode 100644 index 000000000000..c5197d31f585 --- /dev/null +++ b/clients/client-evidently/src/commands/ListProjectsCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { ListProjectsRequest, ListProjectsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListProjectsCommand, + serializeAws_restJson1ListProjectsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListProjectsCommandInput extends ListProjectsRequest {} +export interface ListProjectsCommandOutput extends ListProjectsResponse, __MetadataBearer {} + +/** + *

                                  Returns configuration details about all the projects in the current Region in your + * account.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, ListProjectsCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, ListProjectsCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new ListProjectsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListProjectsCommandInput} for command's `input` shape. + * @see {@link ListProjectsCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class ListProjectsCommand extends $Command< + ListProjectsCommandInput, + ListProjectsCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListProjectsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "ListProjectsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListProjectsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListProjectsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListProjectsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListProjectsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListProjectsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/ListTagsForResourceCommand.ts b/clients/client-evidently/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..c48e2b2ff56e --- /dev/null +++ b/clients/client-evidently/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                                  Displays the tags associated with an Evidently resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, ListTagsForResourceCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, ListTagsForResourceCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/PutProjectEventsCommand.ts b/clients/client-evidently/src/commands/PutProjectEventsCommand.ts new file mode 100644 index 000000000000..475984803917 --- /dev/null +++ b/clients/client-evidently/src/commands/PutProjectEventsCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { PutProjectEventsRequest, PutProjectEventsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1PutProjectEventsCommand, + serializeAws_restJson1PutProjectEventsCommand, +} from "../protocols/Aws_restJson1"; + +export interface PutProjectEventsCommandInput extends PutProjectEventsRequest {} +export interface PutProjectEventsCommandOutput extends PutProjectEventsResponse, __MetadataBearer {} + +/** + *

                                  Sends performance events to Evidently. These events can be used to evaluate a launch or + * an experiment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, PutProjectEventsCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, PutProjectEventsCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new PutProjectEventsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutProjectEventsCommandInput} for command's `input` shape. + * @see {@link PutProjectEventsCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class PutProjectEventsCommand extends $Command< + PutProjectEventsCommandInput, + PutProjectEventsCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutProjectEventsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "PutProjectEventsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutProjectEventsRequest.filterSensitiveLog, + outputFilterSensitiveLog: PutProjectEventsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: PutProjectEventsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1PutProjectEventsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1PutProjectEventsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/StartExperimentCommand.ts b/clients/client-evidently/src/commands/StartExperimentCommand.ts new file mode 100644 index 000000000000..1368c945c58d --- /dev/null +++ b/clients/client-evidently/src/commands/StartExperimentCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { StartExperimentRequest, StartExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StartExperimentCommand, + serializeAws_restJson1StartExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface StartExperimentCommandInput extends StartExperimentRequest {} +export interface StartExperimentCommandOutput extends StartExperimentResponse, __MetadataBearer {} + +/** + *

                                  Starts an existing experiment. To create an experiment, + * use CreateExperiment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, StartExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, StartExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new StartExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartExperimentCommandInput} for command's `input` shape. + * @see {@link StartExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class StartExperimentCommand extends $Command< + StartExperimentCommandInput, + StartExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "StartExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StartExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StartExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/StartLaunchCommand.ts b/clients/client-evidently/src/commands/StartLaunchCommand.ts new file mode 100644 index 000000000000..891db90cdca3 --- /dev/null +++ b/clients/client-evidently/src/commands/StartLaunchCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { StartLaunchRequest, StartLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StartLaunchCommand, + serializeAws_restJson1StartLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface StartLaunchCommandInput extends StartLaunchRequest {} +export interface StartLaunchCommandOutput extends StartLaunchResponse, __MetadataBearer {} + +/** + *

                                  Starts an existing launch. To create a launch, + * use CreateLaunch.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, StartLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, StartLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new StartLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartLaunchCommandInput} for command's `input` shape. + * @see {@link StartLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class StartLaunchCommand extends $Command< + StartLaunchCommandInput, + StartLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "StartLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StartLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StartLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/StopExperimentCommand.ts b/clients/client-evidently/src/commands/StopExperimentCommand.ts new file mode 100644 index 000000000000..cebd743778ab --- /dev/null +++ b/clients/client-evidently/src/commands/StopExperimentCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { StopExperimentRequest, StopExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StopExperimentCommand, + serializeAws_restJson1StopExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface StopExperimentCommandInput extends StopExperimentRequest {} +export interface StopExperimentCommandOutput extends StopExperimentResponse, __MetadataBearer {} + +/** + *

                                  Stops an experiment that is currently running. If you stop an experiment, you can't + * resume it or restart it.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, StopExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, StopExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new StopExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopExperimentCommandInput} for command's `input` shape. + * @see {@link StopExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class StopExperimentCommand extends $Command< + StopExperimentCommandInput, + StopExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "StopExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StopExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: StopExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StopExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StopExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StopExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/StopLaunchCommand.ts b/clients/client-evidently/src/commands/StopLaunchCommand.ts new file mode 100644 index 000000000000..5d68f4a3c6c6 --- /dev/null +++ b/clients/client-evidently/src/commands/StopLaunchCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { StopLaunchRequest, StopLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StopLaunchCommand, + serializeAws_restJson1StopLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface StopLaunchCommandInput extends StopLaunchRequest {} +export interface StopLaunchCommandOutput extends StopLaunchResponse, __MetadataBearer {} + +/** + *

                                  Stops a launch that is currently running. After you stop a launch, you will not be able to resume it or restart it. + * Also, it + * will not be evaluated as a rule for traffic allocation, and the traffic that was allocated to the launch + * will instead be available to the feature's experiment, if there is one. Otherwise, all traffic + * will be served the default variation after the launch is stopped.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, StopLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, StopLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new StopLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StopLaunchCommandInput} for command's `input` shape. + * @see {@link StopLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class StopLaunchCommand extends $Command< + StopLaunchCommandInput, + StopLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StopLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "StopLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StopLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: StopLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StopLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StopLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StopLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/TagResourceCommand.ts b/clients/client-evidently/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..6234bf3f0f19 --- /dev/null +++ b/clients/client-evidently/src/commands/TagResourceCommand.ts @@ -0,0 +1,107 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                                  Assigns one or more tags (key-value pairs) to the specified CloudWatch Evidently resource. Projects, + * features, launches, and experiments can be tagged.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + *

                                  You can use the TagResource action with a resource that already has tags. + * If you specify a new tag key for the resource, + * this tag is appended to the list of tags associated + * with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces + * the previous value for that tag.

                                  + *

                                  You can associate as many as 50 tags with a resource.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, TagResourceCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, TagResourceCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UntagResourceCommand.ts b/clients/client-evidently/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..b98980191cdf --- /dev/null +++ b/clients/client-evidently/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                                  Removes one or more tags from the specified resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UntagResourceCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UntagResourceCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UpdateExperimentCommand.ts b/clients/client-evidently/src/commands/UpdateExperimentCommand.ts new file mode 100644 index 000000000000..549352c37417 --- /dev/null +++ b/clients/client-evidently/src/commands/UpdateExperimentCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UpdateExperimentRequest, UpdateExperimentResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateExperimentCommand, + serializeAws_restJson1UpdateExperimentCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateExperimentCommandInput extends UpdateExperimentRequest {} +export interface UpdateExperimentCommandOutput extends UpdateExperimentResponse, __MetadataBearer {} + +/** + *

                                  Updates an Evidently experiment.

                                  + *

                                  Don't use this operation to update an experiment's tag. Instead, use + * TagResource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UpdateExperimentCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UpdateExperimentCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UpdateExperimentCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateExperimentCommandInput} for command's `input` shape. + * @see {@link UpdateExperimentCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UpdateExperimentCommand extends $Command< + UpdateExperimentCommandInput, + UpdateExperimentCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateExperimentCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UpdateExperimentCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateExperimentRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateExperimentResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateExperimentCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateExperimentCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateExperimentCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UpdateFeatureCommand.ts b/clients/client-evidently/src/commands/UpdateFeatureCommand.ts new file mode 100644 index 000000000000..1344d1620b2c --- /dev/null +++ b/clients/client-evidently/src/commands/UpdateFeatureCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UpdateFeatureRequest, UpdateFeatureResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateFeatureCommand, + serializeAws_restJson1UpdateFeatureCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateFeatureCommandInput extends UpdateFeatureRequest {} +export interface UpdateFeatureCommandOutput extends UpdateFeatureResponse, __MetadataBearer {} + +/** + *

                                  Updates an existing feature.

                                  + *

                                  You can't use this operation to update the tags of an existing feature. Instead, use + * TagResource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UpdateFeatureCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UpdateFeatureCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UpdateFeatureCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateFeatureCommandInput} for command's `input` shape. + * @see {@link UpdateFeatureCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UpdateFeatureCommand extends $Command< + UpdateFeatureCommandInput, + UpdateFeatureCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateFeatureCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UpdateFeatureCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateFeatureRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateFeatureResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateFeatureCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateFeatureCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateFeatureCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UpdateLaunchCommand.ts b/clients/client-evidently/src/commands/UpdateLaunchCommand.ts new file mode 100644 index 000000000000..d7208690a73c --- /dev/null +++ b/clients/client-evidently/src/commands/UpdateLaunchCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UpdateLaunchRequest, UpdateLaunchResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateLaunchCommand, + serializeAws_restJson1UpdateLaunchCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateLaunchCommandInput extends UpdateLaunchRequest {} +export interface UpdateLaunchCommandOutput extends UpdateLaunchResponse, __MetadataBearer {} + +/** + *

                                  Updates a launch of a given feature.

                                  + *

                                  Don't use this operation to update the tags of an existing launch. Instead, use + * TagResource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UpdateLaunchCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UpdateLaunchCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UpdateLaunchCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateLaunchCommandInput} for command's `input` shape. + * @see {@link UpdateLaunchCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UpdateLaunchCommand extends $Command< + UpdateLaunchCommandInput, + UpdateLaunchCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateLaunchCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UpdateLaunchCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateLaunchRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateLaunchResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateLaunchCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateLaunchCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateLaunchCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UpdateProjectCommand.ts b/clients/client-evidently/src/commands/UpdateProjectCommand.ts new file mode 100644 index 000000000000..24ad76d13081 --- /dev/null +++ b/clients/client-evidently/src/commands/UpdateProjectCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UpdateProjectRequest, UpdateProjectResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateProjectCommand, + serializeAws_restJson1UpdateProjectCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateProjectCommandInput extends UpdateProjectRequest {} +export interface UpdateProjectCommandOutput extends UpdateProjectResponse, __MetadataBearer {} + +/** + *

                                  Updates the description of an existing project.

                                  + *

                                  To create a new project, use CreateProject.

                                  + *

                                  Don't use this operation to update the data storage options of a project. Instead, use + * UpdateProjectDataDelivery.

                                  + *

                                  Don't use this operation to update the tags of a project. Instead, use + * TagResource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UpdateProjectCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UpdateProjectCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UpdateProjectCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateProjectCommandInput} for command's `input` shape. + * @see {@link UpdateProjectCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UpdateProjectCommand extends $Command< + UpdateProjectCommandInput, + UpdateProjectCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateProjectCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UpdateProjectCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateProjectRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateProjectResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateProjectCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateProjectCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateProjectCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/UpdateProjectDataDeliveryCommand.ts b/clients/client-evidently/src/commands/UpdateProjectDataDeliveryCommand.ts new file mode 100644 index 000000000000..4268ed6e3b0e --- /dev/null +++ b/clients/client-evidently/src/commands/UpdateProjectDataDeliveryCommand.ts @@ -0,0 +1,102 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { EvidentlyClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../EvidentlyClient"; +import { UpdateProjectDataDeliveryRequest, UpdateProjectDataDeliveryResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateProjectDataDeliveryCommand, + serializeAws_restJson1UpdateProjectDataDeliveryCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateProjectDataDeliveryCommandInput extends UpdateProjectDataDeliveryRequest {} +export interface UpdateProjectDataDeliveryCommandOutput extends UpdateProjectDataDeliveryResponse, __MetadataBearer {} + +/** + *

                                  Updates the data storage options for this project. If you store evaluation events, you an + * keep them and analyze them on your own. If you choose not to store evaluation events, + * Evidently deletes them after using them to produce metrics and other experiment results that + * you can view.

                                  + *

                                  You can't specify both cloudWatchLogs and s3Destination in the same operation.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { EvidentlyClient, UpdateProjectDataDeliveryCommand } from "@aws-sdk/client-evidently"; // ES Modules import + * // const { EvidentlyClient, UpdateProjectDataDeliveryCommand } = require("@aws-sdk/client-evidently"); // CommonJS import + * const client = new EvidentlyClient(config); + * const command = new UpdateProjectDataDeliveryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateProjectDataDeliveryCommandInput} for command's `input` shape. + * @see {@link UpdateProjectDataDeliveryCommandOutput} for command's `response` shape. + * @see {@link EvidentlyClientResolvedConfig | config} for EvidentlyClient's `config` shape. + * + */ +export class UpdateProjectDataDeliveryCommand extends $Command< + UpdateProjectDataDeliveryCommandInput, + UpdateProjectDataDeliveryCommandOutput, + EvidentlyClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateProjectDataDeliveryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: EvidentlyClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "EvidentlyClient"; + const commandName = "UpdateProjectDataDeliveryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateProjectDataDeliveryRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateProjectDataDeliveryResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateProjectDataDeliveryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateProjectDataDeliveryCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1UpdateProjectDataDeliveryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-evidently/src/commands/index.ts b/clients/client-evidently/src/commands/index.ts new file mode 100644 index 000000000000..22a149454534 --- /dev/null +++ b/clients/client-evidently/src/commands/index.ts @@ -0,0 +1,32 @@ +export * from "./BatchEvaluateFeatureCommand"; +export * from "./CreateExperimentCommand"; +export * from "./CreateFeatureCommand"; +export * from "./CreateLaunchCommand"; +export * from "./CreateProjectCommand"; +export * from "./DeleteExperimentCommand"; +export * from "./DeleteFeatureCommand"; +export * from "./DeleteLaunchCommand"; +export * from "./DeleteProjectCommand"; +export * from "./EvaluateFeatureCommand"; +export * from "./GetExperimentCommand"; +export * from "./GetExperimentResultsCommand"; +export * from "./GetFeatureCommand"; +export * from "./GetLaunchCommand"; +export * from "./GetProjectCommand"; +export * from "./ListExperimentsCommand"; +export * from "./ListFeaturesCommand"; +export * from "./ListLaunchesCommand"; +export * from "./ListProjectsCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./PutProjectEventsCommand"; +export * from "./StartExperimentCommand"; +export * from "./StartLaunchCommand"; +export * from "./StopExperimentCommand"; +export * from "./StopLaunchCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateExperimentCommand"; +export * from "./UpdateFeatureCommand"; +export * from "./UpdateLaunchCommand"; +export * from "./UpdateProjectCommand"; +export * from "./UpdateProjectDataDeliveryCommand"; diff --git a/clients/client-evidently/src/endpoints.ts b/clients/client-evidently/src/endpoints.ts new file mode 100644 index 000000000000..6ef6d055ae50 --- /dev/null +++ b/clients/client-evidently/src/endpoints.ts @@ -0,0 +1,207 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = { + "ap-northeast-1": { + variants: [ + { + hostname: "evidently.ap-northeast-1.amazonaws.com", + tags: [], + }, + ], + }, + "ap-southeast-1": { + variants: [ + { + hostname: "evidently.ap-southeast-1.amazonaws.com", + tags: [], + }, + ], + }, + "ap-southeast-2": { + variants: [ + { + hostname: "evidently.ap-southeast-2.amazonaws.com", + tags: [], + }, + ], + }, + "eu-central-1": { + variants: [ + { + hostname: "evidently.eu-central-1.amazonaws.com", + tags: [], + }, + ], + }, + "eu-north-1": { + variants: [ + { + hostname: "evidently.eu-north-1.amazonaws.com", + tags: [], + }, + ], + }, + "eu-west-1": { + variants: [ + { + hostname: "evidently.eu-west-1.amazonaws.com", + tags: [], + }, + ], + }, + "us-east-1": { + variants: [ + { + hostname: "evidently.us-east-1.amazonaws.com", + tags: [], + }, + ], + }, + "us-east-2": { + variants: [ + { + hostname: "evidently.us-east-2.amazonaws.com", + tags: [], + }, + ], + }, + "us-west-2": { + variants: [ + { + hostname: "evidently.us-west-2.amazonaws.com", + tags: [], + }, + ], + }, +}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "evidently.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "evidently-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "evidently-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "evidently.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "evidently.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "evidently-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "evidently-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "evidently.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "evidently.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "evidently-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "evidently.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "evidently-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "evidently.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "evidently-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "evidently-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "evidently.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "evidently", + regionHash, + partitionHash, + }); diff --git a/clients/client-evidently/src/index.ts b/clients/client-evidently/src/index.ts new file mode 100644 index 000000000000..3104664227cd --- /dev/null +++ b/clients/client-evidently/src/index.ts @@ -0,0 +1,5 @@ +export * from "./Evidently"; +export * from "./EvidentlyClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-evidently/src/models/index.ts b/clients/client-evidently/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-evidently/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-evidently/src/models/models_0.ts b/clients/client-evidently/src/models/models_0.ts new file mode 100644 index 000000000000..3c9706f24c8a --- /dev/null +++ b/clients/client-evidently/src/models/models_0.ts @@ -0,0 +1,3548 @@ +import { LazyJsonString as __LazyJsonString } from "@aws-sdk/smithy-client"; +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                                  You do not have sufficient permissions to perform this action.

                                  + */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + message?: string; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure assigns a feature variation to one user session.

                                  + */ +export interface EvaluationRequest { + /** + *

                                  The name of the feature being evaluated.

                                  + */ + feature: string | undefined; + + /** + *

                                  An internal ID that represents a unique user session of the application. This + * entityID is checked against any override rules assigned for this + * feature.

                                  + */ + entityId: string | undefined; + + /** + *

                                  A JSON block of attributes that you can optionally pass in. This JSON block is included + * in the evaluation events sent to Evidently from the user session.

                                  + */ + evaluationContext?: __LazyJsonString | string; +} + +export namespace EvaluationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EvaluationRequest): any => ({ + ...obj, + }); +} + +export interface BatchEvaluateFeatureRequest { + /** + *

                                  The name or ARN of the project that contains the feature being evaluated.

                                  + */ + project: string | undefined; + + /** + *

                                  An array of structures, where each structure assigns a feature variation to one user session.

                                  + */ + requests: EvaluationRequest[] | undefined; +} + +export namespace BatchEvaluateFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchEvaluateFeatureRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  The value assigned to a feature variation. This structure must contain exactly one + * field. It can be boolValue, doubleValue, longValue, or + * stringValue.

                                  + */ +export type VariableValue = + | VariableValue.BoolValueMember + | VariableValue.DoubleValueMember + | VariableValue.LongValueMember + | VariableValue.StringValueMember + | VariableValue.$UnknownMember; + +export namespace VariableValue { + /** + *

                                  If this feature uses the Boolean variation type, this field contains the Boolean value of + * this variation.

                                  + */ + export interface BoolValueMember { + boolValue: boolean; + stringValue?: never; + longValue?: never; + doubleValue?: never; + $unknown?: never; + } + + /** + *

                                  If this feature uses the string variation type, this field contains the string value of + * this variation.

                                  + */ + export interface StringValueMember { + boolValue?: never; + stringValue: string; + longValue?: never; + doubleValue?: never; + $unknown?: never; + } + + /** + *

                                  If this feature uses the long variation type, this field contains the long value of + * this variation.

                                  + */ + export interface LongValueMember { + boolValue?: never; + stringValue?: never; + longValue: number; + doubleValue?: never; + $unknown?: never; + } + + /** + *

                                  If this feature uses the double integer variation type, this field contains the double integer value of + * this variation.

                                  + */ + export interface DoubleValueMember { + boolValue?: never; + stringValue?: never; + longValue?: never; + doubleValue: number; + $unknown?: never; + } + + export interface $UnknownMember { + boolValue?: never; + stringValue?: never; + longValue?: never; + doubleValue?: never; + $unknown: [string, any]; + } + + export interface Visitor { + boolValue: (value: boolean) => T; + stringValue: (value: string) => T; + longValue: (value: number) => T; + doubleValue: (value: number) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: VariableValue, visitor: Visitor): T => { + if (value.boolValue !== undefined) return visitor.boolValue(value.boolValue); + if (value.stringValue !== undefined) return visitor.stringValue(value.stringValue); + if (value.longValue !== undefined) return visitor.longValue(value.longValue); + if (value.doubleValue !== undefined) return visitor.doubleValue(value.doubleValue); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; + + /** + * @internal + */ + export const filterSensitiveLog = (obj: VariableValue): any => { + if (obj.boolValue !== undefined) return { boolValue: obj.boolValue }; + if (obj.stringValue !== undefined) return { stringValue: obj.stringValue }; + if (obj.longValue !== undefined) return { longValue: obj.longValue }; + if (obj.doubleValue !== undefined) return { doubleValue: obj.doubleValue }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; + }; +} + +/** + *

                                  This structure displays the results of one feature evaluation assignment to one user + * session.

                                  + */ +export interface EvaluationResult { + /** + *

                                  The name or ARN of the project that contains the feature being evaluated.

                                  + */ + project?: string; + + /** + *

                                  The name of the feature being evaluated.

                                  + */ + feature: string | undefined; + + /** + *

                                  The name of the variation that was served to the user session.

                                  + */ + variation?: string; + + /** + *

                                  The value assigned to this variation to differentiate it from the other variations of this feature.

                                  + */ + value?: VariableValue; + + /** + *

                                  An internal ID that represents a unique user session of the application.

                                  + */ + entityId: string | undefined; + + /** + *

                                  Specifies the reason that the user session was assigned this variation. Possible values + * include DEFAULT, meaning the user was served the default variation; + * LAUNCH_RULE_MATCH, if the user session was enrolled in a launch; or + * EXPERIMENT_RULE_MATCH, if the user session was enrolled in an + * experiment.

                                  + */ + reason?: string; + + /** + *

                                  If this user was assigned to a launch or experiment, this field lists the launch or experiment name.

                                  + */ + details?: __LazyJsonString | string; +} + +export namespace EvaluationResult { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EvaluationResult): any => ({ + ...obj, + ...(obj.value && { value: VariableValue.filterSensitiveLog(obj.value) }), + }); +} + +export interface BatchEvaluateFeatureResponse { + /** + *

                                  An array of structures, where each structure displays the results of one feature evaluation + * assignment to one user session.

                                  + */ + results?: EvaluationResult[]; +} + +export namespace BatchEvaluateFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchEvaluateFeatureResponse): any => ({ + ...obj, + ...(obj.results && { results: obj.results.map((item) => EvaluationResult.filterSensitiveLog(item)) }), + }); +} + +/** + *

                                  The request references a resource that does not exist.

                                  + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message?: string; + /** + *

                                  The ID of the resource that caused the exception.

                                  + */ + resourceId?: string; + + /** + *

                                  The type of the resource that is associated with the error.

                                  + */ + resourceType?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +/** + *

                                  The request was denied because of request throttling. Retry the request.

                                  + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message?: string; + /** + *

                                  The ID of the service that is associated with the error.

                                  + */ + serviceCode?: string; + + /** + *

                                  The ID of the service quota that was exceeded.

                                  + */ + quotaCode?: string; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure containing an error name and message.

                                  + */ +export interface ValidationExceptionField { + /** + *

                                  The error name.

                                  + */ + name: string | undefined; + + /** + *

                                  The error message.

                                  + */ + message: string | undefined; +} + +export namespace ValidationExceptionField { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationExceptionField): any => ({ + ...obj, + }); +} + +export enum ValidationExceptionReason { + CANNOT_PARSE = "cannotParse", + FIELD_VALIDATION_FAILED = "fieldValidationFailed", + OTHER = "other", + UNKNOWN_OPERATION = "unknownOperation", +} + +/** + *

                                  The value of a parameter in the request caused an error.

                                  + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message?: string; + /** + *

                                  A reason for the error.

                                  + */ + reason?: ValidationExceptionReason | string; + + /** + *

                                  The parameter that caused the exception.

                                  + */ + fieldList?: ValidationExceptionField[]; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +export enum ChangeDirectionEnum { + DECREASE = "DECREASE", + INCREASE = "INCREASE", +} + +/** + *

                                  A structure containing the CloudWatch Logs log group where the project stores evaluation + * events.

                                  + */ +export interface CloudWatchLogsDestination { + /** + *

                                  The name of the log group where the project stores evaluation + * events.

                                  + */ + logGroup?: string; +} + +export namespace CloudWatchLogsDestination { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudWatchLogsDestination): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure containing the CloudWatch Logs log group where the project stores evaluation + * events.

                                  + */ +export interface CloudWatchLogsDestinationConfig { + /** + *

                                  The name of the log group where the project stores evaluation + * events.

                                  + */ + logGroup?: string; +} + +export namespace CloudWatchLogsDestinationConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CloudWatchLogsDestinationConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  A resource was in an inconsistent state during an update or a deletion.

                                  + */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message?: string; + /** + *

                                  The ID of the resource that caused the exception.

                                  + */ + resourceId?: string; + + /** + *

                                  The type of the resource that is associated with the error.

                                  + */ + resourceType?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure defines a metric that you want to use to evaluate the variations + * during a launch or experiment.

                                  + */ +export interface MetricDefinitionConfig { + /** + *

                                  A name for the metric.

                                  + */ + name?: string; + + /** + *

                                  The entity, such as a user or session, that does an action that causes a metric + * value to be recorded. An example is userDetails.userID.

                                  + */ + entityIdKey?: string; + + /** + *

                                  The value that is tracked to produce the metric.

                                  + */ + valueKey?: string; + + /** + *

                                  The EventBridge event pattern that defines how the metric is recorded.

                                  + *

                                  For more information about EventBridge event patterns, see + * Amazon EventBridge event patterns.

                                  + */ + eventPattern?: __LazyJsonString | string; + + /** + *

                                  A label for the units that the metric is measuring.

                                  + */ + unitLabel?: string; +} + +export namespace MetricDefinitionConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricDefinitionConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  Use this structure to tell Evidently whether higher or lower values are desired for a metric that is + * used in an experiment.

                                  + */ +export interface MetricGoalConfig { + /** + *

                                  A structure that contains details about the metric.

                                  + */ + metricDefinition: MetricDefinitionConfig | undefined; + + /** + *

                                  + * INCREASE means that a variation with a higher number for this metric is performing + * better.

                                  + *

                                  + * DECREASE means that a variation with a lower number for this metric is performing + * better.

                                  + */ + desiredChange?: ChangeDirectionEnum | string; +} + +export namespace MetricGoalConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricGoalConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains the configuration of which variation to use as the "control" + * version. The "control" version is used for comparison with other variations. This structure + * also specifies how much experiment traffic is allocated to each variation.

                                  + */ +export interface OnlineAbConfig { + /** + *

                                  The name of the variation that is to be the default variation that the other variations are compared to.

                                  + */ + controlTreatmentName?: string; + + /** + *

                                  A set of key-value pairs. The keys are variation names, and the values are the portion + * of experiment traffic to be assigned to that variation. Specify the traffic portion in + * thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment + * traffic to that variation.

                                  + */ + treatmentWeights?: { [key: string]: number }; +} + +export namespace OnlineAbConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OnlineAbConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines one treatment in an experiment. A treatment is a variation of the feature + * that you are including in the experiment.

                                  + */ +export interface TreatmentConfig { + /** + *

                                  A name for this treatment.

                                  + */ + name: string | undefined; + + /** + *

                                  A description for this treatment.

                                  + */ + description?: string; + + /** + *

                                  The feature that this experiment is testing.

                                  + */ + feature: string | undefined; + + /** + *

                                  The name of the variation to use as this treatment in the experiment.

                                  + */ + variation: string | undefined; +} + +export namespace TreatmentConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TreatmentConfig): any => ({ + ...obj, + }); +} + +export interface CreateExperimentRequest { + /** + *

                                  The name or ARN of the project that you want to create the new experiment in.

                                  + */ + project: string | undefined; + + /** + *

                                  A name for the new experiment.

                                  + */ + name: string | undefined; + + /** + *

                                  An optional description of the experiment.

                                  + */ + description?: string; + + /** + *

                                  An array of structures that describe the configuration of each feature variation used in the experiment.

                                  + */ + treatments: TreatmentConfig[] | undefined; + + /** + *

                                  An array of structures that defines the metrics used for the experiment, and whether a higher + * or lower value for each metric is the goal.

                                  + */ + metricGoals: MetricGoalConfig[] | undefined; + + /** + *

                                  When Evidently assigns a particular user session to an experiment, it must use a randomization ID + * to determine which variation the user session is served. This randomization ID is a combination of the entity ID + * and randomizationSalt. If you omit randomizationSalt, Evidently uses + * the experiment name as the randomizationSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience + * is the total audience minus the audience that you have allocated to overrides or current launches of + * this feature.

                                  + *

                                  This is represented in thousandths of a percent. For example, specify 10,000 to allocate 10% of the available audience.

                                  + */ + samplingRate?: number; + + /** + *

                                  A structure that contains the configuration of which variation to use as the "control" + * version. tThe "control" version is used for comparison with other variations. This structure + * also specifies how much experiment traffic is allocated to each variation.

                                  + */ + onlineAbConfig?: OnlineAbConfig; + + /** + *

                                  Assigns one or more tags (key-value pairs) to the experiment.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + * + *

                                  You can associate as many as 50 tags with an experiment.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace CreateExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateExperimentRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure contains the date and time that the experiment started and ended.

                                  + */ +export interface ExperimentExecution { + /** + *

                                  The date and time that the experiment started.

                                  + */ + startedTime?: Date; + + /** + *

                                  The date and time that the experiment ended.

                                  + */ + endedTime?: Date; +} + +export namespace ExperimentExecution { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentExecution): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure defines a metric that is being used to evaluate the variations + * during a launch or experiment.

                                  + */ +export interface MetricDefinition { + /** + *

                                  The name of the metric.

                                  + */ + name?: string; + + /** + *

                                  The entity, such as a user or session, that does an action that causes a metric + * value to be recorded.

                                  + */ + entityIdKey?: string; + + /** + *

                                  The value that is tracked to produce the metric.

                                  + */ + valueKey?: string; + + /** + *

                                  The EventBridge event pattern that defines how the metric is recorded.

                                  + *

                                  For more information about EventBridge event patterns, see + * Amazon EventBridge event patterns.

                                  + */ + eventPattern?: __LazyJsonString | string; + + /** + *

                                  The label for the units that the metric is measuring.

                                  + */ + unitLabel?: string; +} + +export namespace MetricDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricDefinition): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that tells Evidently whether higher or lower values are desired for a metric that is + * used in an experiment.

                                  + */ +export interface MetricGoal { + /** + *

                                  A structure that contains details about the metric.

                                  + */ + metricDefinition: MetricDefinition | undefined; + + /** + *

                                  + * INCREASE means that a variation with a higher number for this metric is performing + * better.

                                  + *

                                  + * DECREASE means that a variation with a lower number for this metric is performing + * better.

                                  + */ + desiredChange?: ChangeDirectionEnum | string; +} + +export namespace MetricGoal { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricGoal): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains the configuration of which variation to use as the "control" + * version. The "control" version is used for comparison with other variations. This structure + * also specifies how much experiment traffic is allocated to each variation.

                                  + */ +export interface OnlineAbDefinition { + /** + *

                                  The name of the variation that is the default variation that the other variations are compared to.

                                  + */ + controlTreatmentName?: string; + + /** + *

                                  A set of key-value pairs. The keys are variation names, and the values are the portion + * of experiment traffic to be assigned to that variation. The traffic portion is specified in + * thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment + * traffic to that variation.

                                  + */ + treatmentWeights?: { [key: string]: number }; +} + +export namespace OnlineAbDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OnlineAbDefinition): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure contains the time and date that Evidently completed the analysis of the experiment.

                                  + */ +export interface ExperimentSchedule { + /** + *

                                  The time and date that Evidently completed the analysis of the experiment.

                                  + */ + analysisCompleteTime?: Date; +} + +export namespace ExperimentSchedule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentSchedule): any => ({ + ...obj, + }); +} + +export enum ExperimentStatus { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + CREATED = "CREATED", + RUNNING = "RUNNING", + UPDATING = "UPDATING", +} + +/** + *

                                  A structure that defines one treatment in an experiment. A treatment is a variation of the feature + * that you are including in the experiment.

                                  + */ +export interface Treatment { + /** + *

                                  The name of this treatment.

                                  + */ + name: string | undefined; + + /** + *

                                  The description of the treatment.

                                  + */ + description?: string; + + /** + *

                                  The feature variation used for this treatment. This is a key-value pair. The key is the + * feature name, and the value is the variation name.

                                  + */ + featureVariations?: { [key: string]: string }; +} + +export namespace Treatment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Treatment): any => ({ + ...obj, + }); +} + +export enum ExperimentType { + ONLINE_AB_EXPERIMENT = "aws.evidently.onlineab", +} + +/** + *

                                  A structure containing the configuration details of an experiment.

                                  + */ +export interface Experiment { + /** + *

                                  The ARN of the experiment.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the experiment.

                                  + */ + name: string | undefined; + + /** + *

                                  The name or ARN of the project that contains this experiment.

                                  + */ + project?: string; + + /** + *

                                  The current state of the experiment.

                                  + */ + status: ExperimentStatus | string | undefined; + + /** + *

                                  If the experiment was stopped, this is the string that was entered by the person who + * stopped the experiment, to explain why it was stopped.

                                  + */ + statusReason?: string; + + /** + *

                                  A description of the experiment.

                                  + */ + description?: string; + + /** + *

                                  The date and time that the experiment is first created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the experiment was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  A structure that contains the time and date that Evidently completed the analysis of the experiment.

                                  + */ + schedule?: ExperimentSchedule; + + /** + *

                                  A structure that contains the date and time that the experiment started and ended.

                                  + */ + execution?: ExperimentExecution; + + /** + *

                                  An array of structures that describe the configuration of each feature variation used in the experiment.

                                  + */ + treatments?: Treatment[]; + + /** + *

                                  An array of structures that defines the metrics used for the experiment, and whether a higher + * or lower value for each metric is the goal.

                                  + */ + metricGoals?: MetricGoal[]; + + /** + *

                                  This value is used when Evidently assigns a particular user session to the experiment. It + * helps create a randomization ID to determine which variation the user session is served. This + * randomization ID is a combination of the entity ID and randomizationSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  In thousandths of a percent, the amount of the available audience that is allocated to this experiment. + * The available audience + * is the total audience minus the audience that you have allocated to overrides or current launches of + * this feature.

                                  + *

                                  This is represented in thousandths of a percent, so a value of 10,000 is 10% of the available audience.

                                  + */ + samplingRate?: number; + + /** + *

                                  The type of this experiment. Currently, this value must be aws.experiment.onlineab.

                                  + */ + type: ExperimentType | string | undefined; + + /** + *

                                  A structure that contains the configuration of which variation to use as the "control" + * version. The "control" version is used for comparison with other variations. This structure + * also specifies how much experiment traffic is allocated to each variation.

                                  + */ + onlineAbDefinition?: OnlineAbDefinition; + + /** + *

                                  The list of tag keys and values associated with this experiment.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace Experiment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Experiment): any => ({ + ...obj, + }); +} + +export interface CreateExperimentResponse { + /** + *

                                  A structure containing the configuration details of the experiment + * that you created.

                                  + */ + experiment: Experiment | undefined; +} + +export namespace CreateExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateExperimentResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  The request would cause a service quota to be exceeded.

                                  + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + message?: string; + /** + *

                                  The ID of the resource that caused the exception.

                                  + */ + resourceId?: string; + + /** + *

                                  The type of the resource that is associated with the error.

                                  + */ + resourceType?: string; + + /** + *

                                  The ID of the service that is associated with the error.

                                  + */ + serviceCode?: string; + + /** + *

                                  The ID of the service quota that was exceeded.

                                  + */ + quotaCode?: string; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +export enum FeatureEvaluationStrategy { + ALL_RULES = "ALL_RULES", + DEFAULT_VARIATION = "DEFAULT_VARIATION", +} + +/** + *

                                  This structure contains the name and variation value of one variation of a feature.

                                  + */ +export interface VariationConfig { + /** + *

                                  The name of the variation.

                                  + */ + name: string | undefined; + + /** + *

                                  The value assigned to this variation.

                                  + */ + value: VariableValue | undefined; +} + +export namespace VariationConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VariationConfig): any => ({ + ...obj, + ...(obj.value && { value: VariableValue.filterSensitiveLog(obj.value) }), + }); +} + +export interface CreateFeatureRequest { + /** + *

                                  The name or ARN of the project that is to contain the new feature.

                                  + */ + project: string | undefined; + + /** + *

                                  The name for the new feature.

                                  + */ + name: string | undefined; + + /** + *

                                  Specify ALL_RULES to activate the traffic allocation specified by any + * ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default + * variation to all users instead.

                                  + */ + evaluationStrategy?: FeatureEvaluationStrategy | string; + + /** + *

                                  An optional description of the feature.

                                  + */ + description?: string; + + /** + *

                                  An array of structures that contain the configuration of the feature's different variations.

                                  + */ + variations: VariationConfig[] | undefined; + + /** + *

                                  The name of the variation to use as the default variation. The default + * variation is served to users who are not allocated to any ongoing launches + * or experiments of this feature.

                                  + *

                                  This variation must also be listed in the variations structure.

                                  + *

                                  If you omit defaultVariation, the first variation listed in + * the variations structure is used as the default variation.

                                  + */ + defaultVariation?: string; + + /** + *

                                  Assigns one or more tags (key-value pairs) to the feature.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + * + *

                                  You can associate as many as 50 tags with a feature.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + tags?: { [key: string]: string }; + + /** + *

                                  Specify users that should always be served a specific variation of a feature. Each user + * is specified by a key-value pair . For each key, specify a user by entering their user ID, + * account ID, or some other identifier. For the value, specify the name of the variation that + * they are to be served.

                                  + */ + entityOverrides?: { [key: string]: string }; +} + +export namespace CreateFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFeatureRequest): any => ({ + ...obj, + ...(obj.variations && { variations: obj.variations.map((item) => VariationConfig.filterSensitiveLog(item)) }), + }); +} + +/** + *

                                  A structure that contains the information about an evaluation rule for this feature, + * if it is used in a launch or experiment.

                                  + */ +export interface EvaluationRule { + /** + *

                                  The name of the experiment or launch.

                                  + */ + name?: string; + + /** + *

                                  This value is aws.evidently.splits if this is an evaluation rule for a launch, and it is + * aws.evidently.onlineab if this is an evaluation rule for an experiment.

                                  + */ + type: string | undefined; +} + +export namespace EvaluationRule { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EvaluationRule): any => ({ + ...obj, + }); +} + +export enum FeatureStatus { + AVAILABLE = "AVAILABLE", + UPDATING = "UPDATING", +} + +export enum VariationValueType { + BOOLEAN = "BOOLEAN", + DOUBLE = "DOUBLE", + LONG = "LONG", + STRING = "STRING", +} + +/** + *

                                  This structure contains the name and variation value of one variation of a feature.

                                  + */ +export interface Variation { + /** + *

                                  The name of the variation.

                                  + */ + name?: string; + + /** + *

                                  The value assigned to this variation.

                                  + */ + value?: VariableValue; +} + +export namespace Variation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Variation): any => ({ + ...obj, + ...(obj.value && { value: VariableValue.filterSensitiveLog(obj.value) }), + }); +} + +/** + *

                                  This structure contains information about one Evidently feature in your account.

                                  + */ +export interface Feature { + /** + *

                                  The ARN of the feature.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the feature.

                                  + */ + name: string | undefined; + + /** + *

                                  The name or ARN of the project that contains the feature.

                                  + */ + project?: string; + + /** + *

                                  The current state of the feature.

                                  + */ + status: FeatureStatus | string | undefined; + + /** + *

                                  The date and time that the feature is created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the feature was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  The description of the feature.

                                  + */ + description?: string; + + /** + *

                                  If this value is ALL_RULES, the traffic allocation specified by + * any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION, + * the default variation is being served to all users.

                                  + */ + evaluationStrategy: FeatureEvaluationStrategy | string | undefined; + + /** + *

                                  Defines the type of value used to define the different feature variations. + * For more information, see Variation types + *

                                  + */ + valueType: VariationValueType | string | undefined; + + /** + *

                                  An array of structures that contain the configuration of the feature's different variations.

                                  + */ + variations: Variation[] | undefined; + + /** + *

                                  The name of the variation that is used as the default variation. The default + * variation is served to users who are not allocated to any ongoing launches + * or experiments of this feature.

                                  + *

                                  This variation must also be listed in the variations structure.

                                  + *

                                  If you omit defaultVariation, the first variation listed in + * the variations structure is used as the default variation.

                                  + */ + defaultVariation?: string; + + /** + *

                                  An array of structures that define the evaluation rules for the feature.

                                  + */ + evaluationRules?: EvaluationRule[]; + + /** + *

                                  The list of tag keys and values associated with this feature.

                                  + */ + tags?: { [key: string]: string }; + + /** + *

                                  A set of key-value pairs that specify users who should always be served a specific + * variation of a feature. Each key specifies a user using their user ID, account ID, or some + * other identifier. The value specifies the name of the variation that the user is to be + * served.

                                  + *

                                  For the override to be successful, the value of the key must match the entityId used + * in the EvaluateFeature operation.

                                  + */ + entityOverrides?: { [key: string]: string }; +} + +export namespace Feature { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Feature): any => ({ + ...obj, + ...(obj.variations && { variations: obj.variations.map((item) => Variation.filterSensitiveLog(item)) }), + }); +} + +export interface CreateFeatureResponse { + /** + *

                                  A structure that contains information about the new feature.

                                  + */ + feature?: Feature; +} + +export namespace CreateFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFeatureResponse): any => ({ + ...obj, + ...(obj.feature && { feature: Feature.filterSensitiveLog(obj.feature) }), + }); +} + +/** + *

                                  A structure that defines one launch group in a launch. A launch group is a variation of + * the feature that you are including in the launch.

                                  + */ +export interface LaunchGroupConfig { + /** + *

                                  A name for this launch group.

                                  + */ + name: string | undefined; + + /** + *

                                  A description of the launch group.

                                  + */ + description?: string; + + /** + *

                                  The feature that this launch is using.

                                  + */ + feature: string | undefined; + + /** + *

                                  The feature variation to use for this launch group.

                                  + */ + variation: string | undefined; +} + +export namespace LaunchGroupConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LaunchGroupConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines a metric to be used to monitor performance of the variations during a launch.

                                  + */ +export interface MetricMonitorConfig { + /** + *

                                  A structure that defines the metric.

                                  + */ + metricDefinition: MetricDefinitionConfig | undefined; +} + +export namespace MetricMonitorConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricMonitorConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure defines the traffic allocation percentages among the feature + * variations during one step of a launch, and the start time of that step.

                                  + */ +export interface ScheduledSplitConfig { + /** + *

                                  The date and time that this step of the launch starts.

                                  + */ + startTime: Date | undefined; + + /** + *

                                  The traffic allocation percentages among the feature variations during one step of a + * launch. This is a set of key-value pairs. The keys are variation names. The values represent + * the percentage of traffic to allocate to that variation during this step.

                                  + */ + groupWeights: { [key: string]: number } | undefined; +} + +export namespace ScheduledSplitConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScheduledSplitConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of a launch. This also defines the start time of each step.

                                  + */ +export interface ScheduledSplitsLaunchConfig { + /** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of the launch. This also defines the start time of each + * step.

                                  + */ + steps: ScheduledSplitConfig[] | undefined; +} + +export namespace ScheduledSplitsLaunchConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScheduledSplitsLaunchConfig): any => ({ + ...obj, + }); +} + +export interface CreateLaunchRequest { + /** + *

                                  The name or ARN of the project that you want to create the launch in.

                                  + */ + project: string | undefined; + + /** + *

                                  The name for the new launch.

                                  + */ + name: string | undefined; + + /** + *

                                  An optional description for the launch.

                                  + */ + description?: string; + + /** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of the launch.

                                  + */ + scheduledSplitsConfig?: ScheduledSplitsLaunchConfig; + + /** + *

                                  An array of structures that define the metrics that will be used to monitor + * the launch performance.

                                  + */ + metricMonitors?: MetricMonitorConfig[]; + + /** + *

                                  An array of structures that contains the feature and variations that are to be used for the launch.

                                  + */ + groups: LaunchGroupConfig[] | undefined; + + /** + *

                                  When Evidently assigns a particular user session to a launch, it must use a randomization ID + * to determine which variation the user session is served. This randomization ID is a combination of the entity ID + * and randomizationSalt. If you omit randomizationSalt, Evidently uses + * the launch name as the randomizationsSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  Assigns one or more tags (key-value pairs) to the launch.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + * + *

                                  You can associate as many as 50 tags with a launch.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace CreateLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateLaunchRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure contains information about the start and end times of the launch.

                                  + */ +export interface LaunchExecution { + /** + *

                                  The date and time that the launch started.

                                  + */ + startedTime?: Date; + + /** + *

                                  The date and time that the launch ended.

                                  + */ + endedTime?: Date; +} + +export namespace LaunchExecution { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LaunchExecution): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines one launch group in a launch. A launch group is a variation of the feature + * that you are including in the launch.

                                  + */ +export interface LaunchGroup { + /** + *

                                  The name of the launch group.

                                  + */ + name: string | undefined; + + /** + *

                                  A description of the launch group.

                                  + */ + description?: string; + + /** + *

                                  The feature variation for this launch group. This is a key-value pair.

                                  + */ + featureVariations: { [key: string]: string } | undefined; +} + +export namespace LaunchGroup { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LaunchGroup): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines a metric to be used to monitor performance of the variations during a launch.

                                  + */ +export interface MetricMonitor { + /** + *

                                  A structure that defines the metric.

                                  + */ + metricDefinition: MetricDefinition | undefined; +} + +export namespace MetricMonitor { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MetricMonitor): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure defines the traffic allocation percentages among the feature + * variations during one step of a launch, and the start time of that step.

                                  + */ +export interface ScheduledSplit { + /** + *

                                  The date and time that this step of the launch starts.

                                  + */ + startTime: Date | undefined; + + /** + *

                                  The traffic allocation percentages among the feature variations during one step of a + * launch. This is a set of key-value pairs. The keys are variation names. The values represent + * the percentage of traffic to allocate to that variation during this step.

                                  + */ + groupWeights?: { [key: string]: number }; +} + +export namespace ScheduledSplit { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScheduledSplit): any => ({ + ...obj, + }); +} + +/** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of a launch. This also defines the start time of each step.

                                  + */ +export interface ScheduledSplitsLaunchDefinition { + /** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of the launch. This also defines the start time of each + * step.

                                  + */ + steps?: ScheduledSplit[]; +} + +export namespace ScheduledSplitsLaunchDefinition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScheduledSplitsLaunchDefinition): any => ({ + ...obj, + }); +} + +export enum LaunchStatus { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", + CREATED = "CREATED", + RUNNING = "RUNNING", + UPDATING = "UPDATING", +} + +export enum LaunchType { + SCHEDULED_SPLITS_LAUNCH = "aws.evidently.splits", +} + +/** + *

                                  This structure contains the configuration details of one Evidently launch.

                                  + */ +export interface Launch { + /** + *

                                  The ARN of the launch.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the launch.

                                  + */ + name: string | undefined; + + /** + *

                                  The name or ARN of the project that contains the launch.

                                  + */ + project?: string; + + /** + *

                                  The current state of the launch.

                                  + */ + status: LaunchStatus | string | undefined; + + /** + *

                                  If the launch was stopped, this is the string that was entered by the person who + * stopped the launch, to explain why it was stopped.

                                  + */ + statusReason?: string; + + /** + *

                                  The description of the launch.

                                  + */ + description?: string; + + /** + *

                                  The date and time that the launch is created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the launch was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  A structure that contains information about the start and end times of the launch.

                                  + */ + execution?: LaunchExecution; + + /** + *

                                  An array of structures that define the feature variations that are being used in the launch.

                                  + */ + groups?: LaunchGroup[]; + + /** + *

                                  An array of structures that define the metrics that are being used to monitor the launch performance.

                                  + */ + metricMonitors?: MetricMonitor[]; + + /** + *

                                  This value is used when Evidently assigns a particular user session to the launch, to help create a randomization ID + * to determine which variation the user session is served. This randomization ID is a combination of the entity ID + * and randomizationSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  The type of launch.

                                  + */ + type: LaunchType | string | undefined; + + /** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of the launch.

                                  + */ + scheduledSplitsDefinition?: ScheduledSplitsLaunchDefinition; + + /** + *

                                  The list of tag keys and values associated with this launch.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace Launch { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Launch): any => ({ + ...obj, + }); +} + +export interface CreateLaunchResponse { + /** + *

                                  A structure that contains the configuration of the launch that was created.

                                  + */ + launch: Launch | undefined; +} + +export namespace CreateLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateLaunchResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  If the project stores evaluation events in an Amazon S3 bucket, this structure + * stores the bucket name and bucket prefix.

                                  + */ +export interface S3DestinationConfig { + /** + *

                                  The name of the bucket in which Evidently stores evaluation events.

                                  + */ + bucket?: string; + + /** + *

                                  The bucket prefix in which Evidently stores evaluation events.

                                  + */ + prefix?: string; +} + +export namespace S3DestinationConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3DestinationConfig): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about where Evidently is to store + * evaluation events for longer term storage.

                                  + */ +export interface ProjectDataDeliveryConfig { + /** + *

                                  If the project stores evaluation events in an Amazon S3 bucket, this structure + * stores the bucket name and bucket prefix.

                                  + */ + s3Destination?: S3DestinationConfig; + + /** + *

                                  If the project stores evaluation events in CloudWatch Logs, this structure + * stores the log group name.

                                  + */ + cloudWatchLogs?: CloudWatchLogsDestinationConfig; +} + +export namespace ProjectDataDeliveryConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProjectDataDeliveryConfig): any => ({ + ...obj, + }); +} + +export interface CreateProjectRequest { + /** + *

                                  The name for the project.

                                  + */ + name: string | undefined; + + /** + *

                                  An optional description of the project.

                                  + */ + description?: string; + + /** + *

                                  A structure that contains information about where Evidently is to store + * evaluation events for longer term storage, if you choose to do so. If you choose + * not to store these events, Evidently deletes them after using them to produce metrics and other experiment + * results that you can view.

                                  + */ + dataDelivery?: ProjectDataDeliveryConfig; + + /** + *

                                  Assigns one or more tags (key-value pairs) to the project.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + * + *

                                  You can associate as many as 50 tags with a project.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace CreateProjectRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateProjectRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  If the project stores evaluation events in an Amazon S3 bucket, this structure + * stores the bucket name and bucket prefix.

                                  + */ +export interface S3Destination { + /** + *

                                  The name of the bucket in which Evidently stores evaluation events.

                                  + */ + bucket?: string; + + /** + *

                                  The bucket prefix in which Evidently stores evaluation events.

                                  + */ + prefix?: string; +} + +export namespace S3Destination { + /** + * @internal + */ + export const filterSensitiveLog = (obj: S3Destination): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about where Evidently is to store + * evaluation events for longer term storage.

                                  + */ +export interface ProjectDataDelivery { + /** + *

                                  If the project stores evaluation events in an Amazon S3 bucket, this structure + * stores the bucket name and bucket prefix.

                                  + */ + s3Destination?: S3Destination; + + /** + *

                                  If the project stores evaluation events in CloudWatch Logs, this structure + * stores the log group name.

                                  + */ + cloudWatchLogs?: CloudWatchLogsDestination; +} + +export namespace ProjectDataDelivery { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProjectDataDelivery): any => ({ + ...obj, + }); +} + +export enum ProjectStatus { + AVAILABLE = "AVAILABLE", + UPDATING = "UPDATING", +} + +/** + *

                                  This structure defines a project, which is the logical object in Evidently that can contain features, launches, and + * experiments. Use projects to group similar features together.

                                  + */ +export interface Project { + /** + *

                                  The name or ARN of the project.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the project.

                                  + */ + name: string | undefined; + + /** + *

                                  The current state of the project.

                                  + */ + status: ProjectStatus | string | undefined; + + /** + *

                                  The user-entered description of the project.

                                  + */ + description?: string; + + /** + *

                                  The date and time that the project is created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the project was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  The number of features currently in the project.

                                  + */ + featureCount?: number; + + /** + *

                                  The number of launches currently in the project. This includes all launches that have been created + * and not deleted, whether they are ongoing or not.

                                  + */ + launchCount?: number; + + /** + *

                                  The number of ongoing launches currently in the project.

                                  + */ + activeLaunchCount?: number; + + /** + *

                                  The number of experiments currently in the project. This includes all experiments that have been created + * and not deleted, whether they are ongoing or not.

                                  + */ + experimentCount?: number; + + /** + *

                                  The number of ongoing experiments currently in the project.

                                  + */ + activeExperimentCount?: number; + + /** + *

                                  A structure that contains information about where Evidently is to store + * evaluation events for longer term storage.

                                  + */ + dataDelivery?: ProjectDataDelivery; + + /** + *

                                  The list of tag keys and values associated with this project.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace Project { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Project): any => ({ + ...obj, + }); +} + +export interface CreateProjectResponse { + /** + *

                                  A structure that contains information about the created project.

                                  + */ + project: Project | undefined; +} + +export namespace CreateProjectResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateProjectResponse): any => ({ + ...obj, + }); +} + +export interface DeleteExperimentRequest { + /** + *

                                  The name or ARN of the project that contains the experiment to delete.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment to delete.

                                  + */ + experiment: string | undefined; +} + +export namespace DeleteExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteExperimentRequest): any => ({ + ...obj, + }); +} + +export interface DeleteExperimentResponse {} + +export namespace DeleteExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteExperimentResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Unexpected error while processing the request. Retry the request.

                                  + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + message?: string; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

                                  The service was unavailable. Retry the request.

                                  + */ +export interface ServiceUnavailableException extends __SmithyException, $MetadataBearer { + name: "ServiceUnavailableException"; + $fault: "server"; + message?: string; +} + +export namespace ServiceUnavailableException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceUnavailableException): any => ({ + ...obj, + }); +} + +export interface DeleteFeatureRequest { + /** + *

                                  The name or ARN of the project that contains the feature to delete.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the feature to delete.

                                  + */ + feature: string | undefined; +} + +export namespace DeleteFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFeatureRequest): any => ({ + ...obj, + }); +} + +export interface DeleteFeatureResponse {} + +export namespace DeleteFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFeatureResponse): any => ({ + ...obj, + }); +} + +export interface DeleteLaunchRequest { + /** + *

                                  The name or ARN of the project that contains the launch to delete.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the launch to delete.

                                  + */ + launch: string | undefined; +} + +export namespace DeleteLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteLaunchRequest): any => ({ + ...obj, + }); +} + +export interface DeleteLaunchResponse {} + +export namespace DeleteLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteLaunchResponse): any => ({ + ...obj, + }); +} + +export interface DeleteProjectRequest { + /** + *

                                  The name or ARN of the project to delete.

                                  + */ + project: string | undefined; +} + +export namespace DeleteProjectRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteProjectRequest): any => ({ + ...obj, + }); +} + +export interface DeleteProjectResponse {} + +export namespace DeleteProjectResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteProjectResponse): any => ({ + ...obj, + }); +} + +export interface EvaluateFeatureRequest { + /** + *

                                  The name or ARN of the project that contains this feature.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the feature being evaluated.

                                  + */ + feature: string | undefined; + + /** + *

                                  An internal ID that represents a unique user of the application. This + * entityID is checked against any override rules assigned for this + * feature.

                                  + */ + entityId: string | undefined; + + /** + *

                                  A JSON block of attributes that you can optionally pass in. This JSON block is included + * in the evaluation events sent to Evidently from the user session.

                                  + */ + evaluationContext?: __LazyJsonString | string; +} + +export namespace EvaluateFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EvaluateFeatureRequest): any => ({ + ...obj, + }); +} + +export interface EvaluateFeatureResponse { + /** + *

                                  The name of the variation that was served to the user session.

                                  + */ + variation?: string; + + /** + *

                                  The value assigned to this variation to differentiate it from the other variations of this feature.

                                  + */ + value?: VariableValue; + + /** + *

                                  Specifies the reason that the user session was assigned this variation. Possible values + * include DEFAULT, meaning the user was served the default variation; + * LAUNCH_RULE_MATCH, if the user session was enrolled in a launch; + * EXPERIMENT_RULE_MATCH, if the user session was enrolled in an experiment; or + * ENTITY_OVERRIDES_MATCH, if the user's entityId matches an override + * rule.

                                  + */ + reason?: string; + + /** + *

                                  If this user was assigned to a launch or experiment, this field lists the launch or experiment name.

                                  + */ + details?: __LazyJsonString | string; +} + +export namespace EvaluateFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EvaluateFeatureResponse): any => ({ + ...obj, + ...(obj.value && { value: VariableValue.filterSensitiveLog(obj.value) }), + }); +} + +export enum EventType { + CUSTOM = "aws.evidently.custom", + EVALUATION = "aws.evidently.evaluation", +} + +/** + *

                                  A structure that contains the information about one evaluation event or custom event sent to Evidently. + * This is a JSON payload. If this event specifies a pre-defined event type, the payload must follow the + * defined event schema.

                                  + */ +export interface Event { + /** + *

                                  The timestamp of the event.

                                  + */ + timestamp: Date | undefined; + + /** + *

                                  + * aws.evidently.evaluation specifies an evaluation event, which determines + * which feature variation that a user sees. aws.evidently.custom specifies a custom + * event, which generates metrics from user actions such as clicks and checkouts.

                                  + */ + type: EventType | string | undefined; + + /** + *

                                  The event data.

                                  + */ + data: __LazyJsonString | string | undefined; +} + +export namespace Event { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Event): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                                  The ARN of the resource that you want to see the tags of.

                                  + */ + resourceArn: string | undefined; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                                  The list of tag keys and values associated with the resource you specified.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface GetExperimentRequest { + /** + *

                                  The name or ARN of the project that contains the experiment.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment that you want to see the details of.

                                  + */ + experiment: string | undefined; +} + +export namespace GetExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExperimentRequest): any => ({ + ...obj, + }); +} + +export interface GetExperimentResponse { + /** + *

                                  A structure containing the configuration details of the experiment.

                                  + */ + experiment?: Experiment; +} + +export namespace GetExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExperimentResponse): any => ({ + ...obj, + }); +} + +export enum ExperimentBaseStat { + MEAN = "Mean", +} + +export enum ExperimentReportName { + BAYESIAN_INFERENCE = "BayesianInference", +} + +export enum ExperimentResultRequestType { + BASE_STAT = "BaseStat", + CONFIDENCE_INTERVAL = "ConfidenceInterval", + P_VALUE = "PValue", + TREATMENT_EFFECT = "TreatmentEffect", +} + +export interface GetExperimentResultsRequest { + /** + *

                                  The name or ARN of the project that contains the experiment that you want to see the results of.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment to retrieve the results of.

                                  + */ + experiment: string | undefined; + + /** + *

                                  The date and time that the experiment started.

                                  + */ + startTime?: Date; + + /** + *

                                  The date and time that the experiment ended, if it is completed.

                                  + */ + endTime?: Date; + + /** + *

                                  The names of the experiment metrics that you want to see the results of.

                                  + */ + metricNames: string[] | undefined; + + /** + *

                                  The names of the experiment treatments that you want to see the results for.

                                  + */ + treatmentNames: string[] | undefined; + + /** + *

                                  The statistic used to calculate experiment results. Currently the only valid value is mean, + * which uses the mean of the collected values as the statistic.

                                  + */ + baseStat?: ExperimentBaseStat | string; + + /** + *

                                  The statistics that you want to see in the returned results.

                                  + *
                                    + *
                                  • + *

                                    + * PValue specifies to use p-values for the results. A p-value is used in hypothesis + * testing to measure how often you are willing to make a mistake in rejecting the null + * hypothesis. A general practice is to reject the null hypothesis and declare that the + * results are statistically significant when the p-value is less than 0.05.

                                    + *
                                  • + *
                                  • + *

                                    + * ConfidenceInterval specifies a confidence interval for the results. The + * confidence interval represents the range of values for the chosen metric that is likely to + * contain the true difference between the baseStat of a variation and the + * baseline. Evidently returns the 95% confidence interval.

                                    + *
                                  • + *
                                  • + *

                                    + * TreatmentEffect is the difference in the statistic specified by the + * baseStat parameter between each variation and the default variation.

                                    + *
                                  • + *
                                  • + *

                                    + * BaseStat returns the statistical values collected for the metric for each + * variation. The statistic uses the same statistic specified in the baseStat + * parameter. Therefore, if baseStat is mean, this returns the mean + * of the values collected for each variation.

                                    + *
                                  • + *
                                  + */ + resultStats?: (ExperimentResultRequestType | string)[]; + + /** + *

                                  The names of the report types that you want to see. Currently, BayesianInference + * is the only valid value.

                                  + */ + reportNames?: (ExperimentReportName | string)[]; + + /** + *

                                  In seconds, the amount of time to aggregate results together.

                                  + */ + period?: number; +} + +export namespace GetExperimentResultsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExperimentResultsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains results of an experiment.

                                  + */ +export interface ExperimentReport { + /** + *

                                  The name of the metric that is analyzed in this experiment report.

                                  + */ + metricName?: string; + + /** + *

                                  The name of the variation that this report pertains to.

                                  + */ + treatmentName?: string; + + /** + *

                                  The type of analysis used for this report.

                                  + */ + reportName?: ExperimentReportName | string; + + /** + *

                                  The content of the report.

                                  + */ + content?: __LazyJsonString | string; +} + +export namespace ExperimentReport { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentReport): any => ({ + ...obj, + }); +} + +export enum ExperimentResultResponseType { + CONFIDENCE_INTERVAL_LOWERBOUND = "ConfidenceIntervalLowerBound", + CONFIDENCE_INTERVAL_UPPERBOUND = "ConfidenceIntervalUpperBound", + MEAN = "Mean", + P_VALUE = "PValue", + TREATMENT_EFFECT = "TreatmentEffect", +} + +/** + *

                                  A structure that contains experiment results for one metric that is monitored in + * the experiment.

                                  + */ +export interface ExperimentResultsData { + /** + *

                                  The name of the metric.

                                  + */ + metricName?: string; + + /** + *

                                  The treatment, or variation, that returned the values in this structure.

                                  + */ + treatmentName?: string; + + /** + *

                                  The experiment statistic that these results pertain to.

                                  + */ + resultStat?: ExperimentResultResponseType | string; + + /** + *

                                  The values for the metricName that were recorded in the experiment.

                                  + */ + values?: number[]; +} + +export namespace ExperimentResultsData { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExperimentResultsData): any => ({ + ...obj, + }); +} + +export interface GetExperimentResultsResponse { + /** + *

                                  An array of structures that include experiment results including metric names and values.

                                  + */ + resultsData?: ExperimentResultsData[]; + + /** + *

                                  An array of structures that include the reports that you requested.

                                  + */ + reports?: ExperimentReport[]; + + /** + *

                                  The timestamps of each result returned.

                                  + */ + timestamps?: Date[]; +} + +export namespace GetExperimentResultsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetExperimentResultsResponse): any => ({ + ...obj, + }); +} + +export interface ListExperimentsRequest { + /** + *

                                  The name or ARN of the project to return the experiment list from.

                                  + */ + project: string | undefined; + + /** + *

                                  The maximum number of results to include in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  The token to use when requesting the next set of results. You received this token from a previous + * ListExperiments operation.

                                  + */ + nextToken?: string; +} + +export namespace ListExperimentsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListExperimentsRequest): any => ({ + ...obj, + }); +} + +export interface ListExperimentsResponse { + /** + *

                                  An array of structures that contain the configuration details of the experiments in the + * specified project.

                                  + */ + experiments?: Experiment[]; + + /** + *

                                  The token to use in a subsequent ListExperiments operation to return + * the next set of results.

                                  + */ + nextToken?: string; +} + +export namespace ListExperimentsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListExperimentsResponse): any => ({ + ...obj, + }); +} + +export interface StartExperimentRequest { + /** + *

                                  The name or ARN of the project that contains the experiment to start.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment to start.

                                  + */ + experiment: string | undefined; + + /** + *

                                  The date and time to end the experiment.

                                  + */ + analysisCompleteTime: Date | undefined; +} + +export namespace StartExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartExperimentRequest): any => ({ + ...obj, + }); +} + +export interface StartExperimentResponse { + /** + *

                                  A timestamp that indicates when the experiment started.

                                  + */ + startedTime?: Date; +} + +export namespace StartExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartExperimentResponse): any => ({ + ...obj, + }); +} + +export enum ExperimentStopDesiredState { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", +} + +export interface StopExperimentRequest { + /** + *

                                  The name or ARN of the project that contains the experiment to stop.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment to stop.

                                  + */ + experiment: string | undefined; + + /** + *

                                  Specify whether the experiment is to be considered COMPLETED or + * CANCELLED after it stops.

                                  + */ + desiredState?: ExperimentStopDesiredState | string; + + /** + *

                                  A string that describes why you are stopping the experiment.

                                  + */ + reason?: string; +} + +export namespace StopExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopExperimentRequest): any => ({ + ...obj, + }); +} + +export interface StopExperimentResponse { + /** + *

                                  The date and time that the experiment stopped.

                                  + */ + endedTime?: Date; +} + +export namespace StopExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopExperimentResponse): any => ({ + ...obj, + }); +} + +export interface UpdateExperimentRequest { + /** + *

                                  The name or ARN of the project that contains the experiment that you want to update.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the experiment to update.

                                  + */ + experiment: string | undefined; + + /** + *

                                  An optional description of the experiment.

                                  + */ + description?: string; + + /** + *

                                  An array of structures that define the variations being tested in the experiment.

                                  + */ + treatments?: TreatmentConfig[]; + + /** + *

                                  An array of structures that defines the metrics used for the experiment, and whether a higher + * or lower value for each metric is the goal.

                                  + */ + metricGoals?: MetricGoalConfig[]; + + /** + *

                                  When Evidently assigns a particular user session to an experiment, it must use a randomization ID + * to determine which variation the user session is served. This randomization ID is a combination of the entity ID + * and randomizationSalt. If you omit randomizationSalt, Evidently uses + * the experiment name as the randomizationSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience + * is the total audience minus the audience that you have allocated to overrides or current launches of + * this feature.

                                  + *

                                  This is represented in thousandths of a percent. For example, specify 20,000 to allocate 20% of the available audience.

                                  + */ + samplingRate?: number; + + /** + *

                                  A structure that contains the configuration of which variation o use as the "control" + * version. The "control" version is used for comparison with other variations. This structure + * also specifies how much experiment traffic is allocated to each variation.

                                  + */ + onlineAbConfig?: OnlineAbConfig; +} + +export namespace UpdateExperimentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateExperimentRequest): any => ({ + ...obj, + }); +} + +export interface UpdateExperimentResponse { + /** + *

                                  A structure containing the configuration details of the experiment + * that was updated.

                                  + */ + experiment: Experiment | undefined; +} + +export namespace UpdateExperimentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateExperimentResponse): any => ({ + ...obj, + }); +} + +export interface GetFeatureRequest { + /** + *

                                  The name or ARN of the project that contains the feature.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the feature that you want to retrieve information for.

                                  + */ + feature: string | undefined; +} + +export namespace GetFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetFeatureRequest): any => ({ + ...obj, + }); +} + +export interface GetFeatureResponse { + /** + *

                                  A structure containing the configuration details of the feature.

                                  + */ + feature: Feature | undefined; +} + +export namespace GetFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetFeatureResponse): any => ({ + ...obj, + ...(obj.feature && { feature: Feature.filterSensitiveLog(obj.feature) }), + }); +} + +export interface ListFeaturesRequest { + /** + *

                                  The name or ARN of the project to return the feature list from.

                                  + */ + project: string | undefined; + + /** + *

                                  The maximum number of results to include in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  The token to use when requesting the next set of results. You received this token from a previous + * ListFeatures operation.

                                  + */ + nextToken?: string; +} + +export namespace ListFeaturesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFeaturesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  This structure contains information about one Evidently feature in your account.

                                  + */ +export interface FeatureSummary { + /** + *

                                  The ARN of the feature.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the feature.

                                  + */ + name: string | undefined; + + /** + *

                                  The name or ARN of the project that contains the feature.

                                  + */ + project?: string; + + /** + *

                                  The current state of the feature.

                                  + */ + status: FeatureStatus | string | undefined; + + /** + *

                                  The date and time that the feature is created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the feature was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  If this value is ALL_RULES, the traffic allocation specified by + * any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION, + * the default variation is being served to all users.

                                  + */ + evaluationStrategy: FeatureEvaluationStrategy | string | undefined; + + /** + *

                                  An array of structures that define

                                  + */ + evaluationRules?: EvaluationRule[]; + + /** + *

                                  The name of the variation that is used as the default variation. The default + * variation is served to users who are not allocated to any ongoing launches + * or experiments of this feature.

                                  + */ + defaultVariation?: string; + + /** + *

                                  The list of tag keys and values associated with this feature.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace FeatureSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FeatureSummary): any => ({ + ...obj, + }); +} + +export interface ListFeaturesResponse { + /** + *

                                  An array of structures that contain the configuration details of the features in the + * specified project.

                                  + */ + features?: FeatureSummary[]; + + /** + *

                                  The token to use in a subsequent ListFeatures operation to return + * the next set of results.

                                  + */ + nextToken?: string; +} + +export namespace ListFeaturesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFeaturesResponse): any => ({ + ...obj, + }); +} + +export interface UpdateFeatureRequest { + /** + *

                                  The name or ARN of the project that contains the feature to be updated.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the feature to be updated.

                                  + */ + feature: string | undefined; + + /** + *

                                  Specify ALL_RULES to activate the traffic allocation specified by any ongoing + * launches or experiments. Specify DEFAULT_VARIATION to serve the default variation + * to all users instead.

                                  + */ + evaluationStrategy?: FeatureEvaluationStrategy | string; + + /** + *

                                  An optional description of the feature.

                                  + */ + description?: string; + + /** + *

                                  To update variation configurations for this feature, or add new ones, specify this structure. + * In this array, include any variations that you want to add or update. If the array includes a variation name that + * already exists for this feature, it is updated. If it includes a new variation name, it is added + * as a new variation.

                                  + */ + addOrUpdateVariations?: VariationConfig[]; + + /** + *

                                  Removes a variation from the feature. If the variation you specify doesn't exist, then this + * makes no change and does not report an error.

                                  + *

                                  This operation fails if you try to remove a variation that is part of an + * ongoing launch or experiment.

                                  + */ + removeVariations?: string[]; + + /** + *

                                  The name of the variation to use as the default variation. The default + * variation is served to users who are not allocated to any ongoing launches + * or experiments of this feature.

                                  + */ + defaultVariation?: string; + + /** + *

                                  Specified users that should always be served a specific variation of a feature. Each user + * is specified by a key-value pair . For each key, specify a user by entering their user ID, + * account ID, or some other identifier. For the value, specify the name of the variation that + * they are to be served.

                                  + */ + entityOverrides?: { [key: string]: string }; +} + +export namespace UpdateFeatureRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateFeatureRequest): any => ({ + ...obj, + ...(obj.addOrUpdateVariations && { + addOrUpdateVariations: obj.addOrUpdateVariations.map((item) => VariationConfig.filterSensitiveLog(item)), + }), + }); +} + +export interface UpdateFeatureResponse { + /** + *

                                  A structure that contains information about the updated feature.

                                  + */ + feature: Feature | undefined; +} + +export namespace UpdateFeatureResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateFeatureResponse): any => ({ + ...obj, + ...(obj.feature && { feature: Feature.filterSensitiveLog(obj.feature) }), + }); +} + +export interface GetProjectRequest { + /** + *

                                  The name or ARN of the project that you want to see the details of.

                                  + */ + project: string | undefined; +} + +export namespace GetProjectRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetProjectRequest): any => ({ + ...obj, + }); +} + +export interface GetProjectResponse { + /** + *

                                  A structure containing the configuration details of the project.

                                  + */ + project: Project | undefined; +} + +export namespace GetProjectResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetProjectResponse): any => ({ + ...obj, + }); +} + +export interface GetLaunchRequest { + /** + *

                                  The name or ARN of the project that contains the launch.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the launch that you want to see the details of.

                                  + */ + launch: string | undefined; +} + +export namespace GetLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLaunchRequest): any => ({ + ...obj, + }); +} + +export interface GetLaunchResponse { + /** + *

                                  A structure containing the configuration details of the launch.

                                  + */ + launch?: Launch; +} + +export namespace GetLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLaunchResponse): any => ({ + ...obj, + }); +} + +export interface ListLaunchesRequest { + /** + *

                                  The name or ARN of the project to return the launch list from.

                                  + */ + project: string | undefined; + + /** + *

                                  The maximum number of results to include in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  The token to use when requesting the next set of results. You received this token from a previous + * ListLaunches operation.

                                  + */ + nextToken?: string; +} + +export namespace ListLaunchesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListLaunchesRequest): any => ({ + ...obj, + }); +} + +export interface ListLaunchesResponse { + /** + *

                                  An array of structures that contain the configuration details of the launches in the + * specified project.

                                  + */ + launches?: Launch[]; + + /** + *

                                  The token to use in a subsequent ListLaunches operation to return + * the next set of results.

                                  + */ + nextToken?: string; +} + +export namespace ListLaunchesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListLaunchesResponse): any => ({ + ...obj, + }); +} + +export interface StartLaunchRequest { + /** + *

                                  The name or ARN of the project that contains the launch to start.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the launch to start.

                                  + */ + launch: string | undefined; +} + +export namespace StartLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartLaunchRequest): any => ({ + ...obj, + }); +} + +export interface StartLaunchResponse { + /** + *

                                  A structure that contains information about the launch that was started.

                                  + */ + launch: Launch | undefined; +} + +export namespace StartLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartLaunchResponse): any => ({ + ...obj, + }); +} + +export enum LaunchStopDesiredState { + CANCELLED = "CANCELLED", + COMPLETED = "COMPLETED", +} + +export interface StopLaunchRequest { + /** + *

                                  The name or ARN of the project that contains the launch that you want to stop.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the launch to stop.

                                  + */ + launch: string | undefined; + + /** + *

                                  Specify whether to consider the launch as COMPLETED or + * CANCELLED after it stops.

                                  + */ + desiredState?: LaunchStopDesiredState | string; + + /** + *

                                  A string that describes why you are stopping the launch.

                                  + */ + reason?: string; +} + +export namespace StopLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopLaunchRequest): any => ({ + ...obj, + }); +} + +export interface StopLaunchResponse { + /** + *

                                  The date and time that the launch stopped.

                                  + */ + endedTime?: Date; +} + +export namespace StopLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StopLaunchResponse): any => ({ + ...obj, + }); +} + +export interface UpdateLaunchRequest { + /** + *

                                  The name or ARN of the project that contains the launch that you want to update.

                                  + */ + project: string | undefined; + + /** + *

                                  The name of the launch that is to be updated.

                                  + */ + launch: string | undefined; + + /** + *

                                  An optional description for the launch.

                                  + */ + description?: string; + + /** + *

                                  An array of structures that contains the feature and variations that are to be used for + * the launch.

                                  + */ + groups?: LaunchGroupConfig[]; + + /** + *

                                  An array of structures that define the metrics that will be used to monitor + * the launch performance.

                                  + */ + metricMonitors?: MetricMonitorConfig[]; + + /** + *

                                  When Evidently assigns a particular user session to a launch, it must use a randomization ID + * to determine which variation the user session is served. This randomization ID is a combination of the entity ID + * and randomizationSalt. If you omit randomizationSalt, Evidently uses + * the launch name as the randomizationSalt.

                                  + */ + randomizationSalt?: string; + + /** + *

                                  An array of structures that define the traffic allocation percentages among the feature + * variations during each step of the launch.

                                  + */ + scheduledSplitsConfig?: ScheduledSplitsLaunchConfig; +} + +export namespace UpdateLaunchRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateLaunchRequest): any => ({ + ...obj, + }); +} + +export interface UpdateLaunchResponse { + /** + *

                                  A structure that contains the new configuration of the launch that was updated.

                                  + */ + launch: Launch | undefined; +} + +export namespace UpdateLaunchResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateLaunchResponse): any => ({ + ...obj, + }); +} + +export interface ListProjectsRequest { + /** + *

                                  The maximum number of results to include in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  The token to use when requesting the next set of results. You received this token from a previous + * ListProjects operation.

                                  + */ + nextToken?: string; +} + +export namespace ListProjectsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProjectsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains configuration information about an Evidently project.

                                  + */ +export interface ProjectSummary { + /** + *

                                  The name or ARN of the project.

                                  + */ + arn: string | undefined; + + /** + *

                                  The name of the project.

                                  + */ + name: string | undefined; + + /** + *

                                  The current state of the project.

                                  + */ + status: ProjectStatus | string | undefined; + + /** + *

                                  The description of the project.

                                  + */ + description?: string; + + /** + *

                                  The date and time that the project is created.

                                  + */ + createdTime: Date | undefined; + + /** + *

                                  The date and time that the project was most recently updated.

                                  + */ + lastUpdatedTime: Date | undefined; + + /** + *

                                  The number of features currently in the project.

                                  + */ + featureCount?: number; + + /** + *

                                  The number of launches currently in the project, including launches that are ongoing, completed, and not started yet.

                                  + */ + launchCount?: number; + + /** + *

                                  The number of ongoing launches currently in the project.

                                  + */ + activeLaunchCount?: number; + + /** + *

                                  The number of experiments currently in the project.

                                  + */ + experimentCount?: number; + + /** + *

                                  The number of experiments currently in the project.

                                  + */ + activeExperimentCount?: number; + + /** + *

                                  The list of tag keys and values associated with this project.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace ProjectSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ProjectSummary): any => ({ + ...obj, + }); +} + +export interface ListProjectsResponse { + /** + *

                                  An array of structures that contain the configuration details of the projects in the Region.

                                  + */ + projects?: ProjectSummary[]; + + /** + *

                                  The token to use in a subsequent ListProjects operation to return + * the next set of results.

                                  + */ + nextToken?: string; +} + +export namespace ListProjectsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListProjectsResponse): any => ({ + ...obj, + }); +} + +export interface PutProjectEventsRequest { + /** + *

                                  The name or ARN of the project to write the events to.

                                  + */ + project: string | undefined; + + /** + *

                                  An array of event structures that contain the performance data that is being sent to + * Evidently.

                                  + */ + events: Event[] | undefined; +} + +export namespace PutProjectEventsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutProjectEventsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains Evidently's response to the sent events, including an event ID and error codes, if any.

                                  + */ +export interface PutProjectEventsResultEntry { + /** + *

                                  A unique ID assigned to this PutProjectEvents operation.

                                  + */ + eventId?: string; + + /** + *

                                  If the PutProjectEvents operation has an error, the error code is returned + * here.

                                  + */ + errorCode?: string; + + /** + *

                                  If the PutProjectEvents operation has an error, the error message is + * returned here.

                                  + */ + errorMessage?: string; +} + +export namespace PutProjectEventsResultEntry { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutProjectEventsResultEntry): any => ({ + ...obj, + }); +} + +export interface PutProjectEventsResponse { + /** + *

                                  The number of events in the operation that could not be used by Evidently.

                                  + */ + failedEventCount?: number; + + /** + *

                                  A structure that contains Evidently's response to the sent events, including an event ID and + * error codes, if any.

                                  + */ + eventResults?: PutProjectEventsResultEntry[]; +} + +export namespace PutProjectEventsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutProjectEventsResponse): any => ({ + ...obj, + }); +} + +export interface UpdateProjectRequest { + /** + *

                                  The name or ARN of the project to update.

                                  + */ + project: string | undefined; + + /** + *

                                  An optional description of the project.

                                  + */ + description?: string; +} + +export namespace UpdateProjectRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateProjectRequest): any => ({ + ...obj, + }); +} + +export interface UpdateProjectResponse { + /** + *

                                  A structure containing information about the updated project.

                                  + */ + project: Project | undefined; +} + +export namespace UpdateProjectResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateProjectResponse): any => ({ + ...obj, + }); +} + +export interface UpdateProjectDataDeliveryRequest { + /** + *

                                  The name or ARN of the project that you want to modify the data storage options for.

                                  + */ + project: string | undefined; + + /** + *

                                  A structure containing the S3 bucket name and bucket prefix where you want to store evaluation events.

                                  + */ + s3Destination?: S3DestinationConfig; + + /** + *

                                  A structure containing the CloudWatch Logs log group where you want to store evaluation + * events.

                                  + */ + cloudWatchLogs?: CloudWatchLogsDestinationConfig; +} + +export namespace UpdateProjectDataDeliveryRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateProjectDataDeliveryRequest): any => ({ + ...obj, + }); +} + +export interface UpdateProjectDataDeliveryResponse { + /** + *

                                  A structure containing details about the project that you updated.

                                  + */ + project: Project | undefined; +} + +export namespace UpdateProjectDataDeliveryResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateProjectDataDeliveryResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                                  The ARN of the CloudWatch Evidently resource that you're adding tags to.

                                  + */ + resourceArn: string | undefined; + + /** + *

                                  The list of key-value pairs to associate with the resource.

                                  + */ + tags: { [key: string]: string } | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                                  The ARN of the CloudWatch Evidently resource that you're removing tags from.

                                  + */ + resourceArn: string | undefined; + + /** + *

                                  The list of tag keys to remove from the resource.

                                  + */ + tagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-evidently/src/pagination/Interfaces.ts b/clients/client-evidently/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..edda5fa1494c --- /dev/null +++ b/clients/client-evidently/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { Evidently } from "../Evidently"; +import { EvidentlyClient } from "../EvidentlyClient"; + +export interface EvidentlyPaginationConfiguration extends PaginationConfiguration { + client: Evidently | EvidentlyClient; +} diff --git a/clients/client-evidently/src/pagination/ListExperimentsPaginator.ts b/clients/client-evidently/src/pagination/ListExperimentsPaginator.ts new file mode 100644 index 000000000000..fa7aab535e33 --- /dev/null +++ b/clients/client-evidently/src/pagination/ListExperimentsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListExperimentsCommand, + ListExperimentsCommandInput, + ListExperimentsCommandOutput, +} from "../commands/ListExperimentsCommand"; +import { Evidently } from "../Evidently"; +import { EvidentlyClient } from "../EvidentlyClient"; +import { EvidentlyPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EvidentlyClient, + input: ListExperimentsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListExperimentsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Evidently, + input: ListExperimentsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listExperiments(input, ...args); +}; +export async function* paginateListExperiments( + config: EvidentlyPaginationConfiguration, + input: ListExperimentsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListExperimentsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Evidently) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EvidentlyClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Evidently | EvidentlyClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-evidently/src/pagination/ListFeaturesPaginator.ts b/clients/client-evidently/src/pagination/ListFeaturesPaginator.ts new file mode 100644 index 000000000000..f9301587b769 --- /dev/null +++ b/clients/client-evidently/src/pagination/ListFeaturesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListFeaturesCommand, + ListFeaturesCommandInput, + ListFeaturesCommandOutput, +} from "../commands/ListFeaturesCommand"; +import { Evidently } from "../Evidently"; +import { EvidentlyClient } from "../EvidentlyClient"; +import { EvidentlyPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EvidentlyClient, + input: ListFeaturesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListFeaturesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Evidently, + input: ListFeaturesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listFeatures(input, ...args); +}; +export async function* paginateListFeatures( + config: EvidentlyPaginationConfiguration, + input: ListFeaturesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListFeaturesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Evidently) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EvidentlyClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Evidently | EvidentlyClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-evidently/src/pagination/ListLaunchesPaginator.ts b/clients/client-evidently/src/pagination/ListLaunchesPaginator.ts new file mode 100644 index 000000000000..34345933f97f --- /dev/null +++ b/clients/client-evidently/src/pagination/ListLaunchesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListLaunchesCommand, + ListLaunchesCommandInput, + ListLaunchesCommandOutput, +} from "../commands/ListLaunchesCommand"; +import { Evidently } from "../Evidently"; +import { EvidentlyClient } from "../EvidentlyClient"; +import { EvidentlyPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EvidentlyClient, + input: ListLaunchesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListLaunchesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Evidently, + input: ListLaunchesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listLaunches(input, ...args); +}; +export async function* paginateListLaunches( + config: EvidentlyPaginationConfiguration, + input: ListLaunchesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListLaunchesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Evidently) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EvidentlyClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Evidently | EvidentlyClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-evidently/src/pagination/ListProjectsPaginator.ts b/clients/client-evidently/src/pagination/ListProjectsPaginator.ts new file mode 100644 index 000000000000..86ed5cdc3616 --- /dev/null +++ b/clients/client-evidently/src/pagination/ListProjectsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListProjectsCommand, + ListProjectsCommandInput, + ListProjectsCommandOutput, +} from "../commands/ListProjectsCommand"; +import { Evidently } from "../Evidently"; +import { EvidentlyClient } from "../EvidentlyClient"; +import { EvidentlyPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: EvidentlyClient, + input: ListProjectsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListProjectsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Evidently, + input: ListProjectsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listProjects(input, ...args); +}; +export async function* paginateListProjects( + config: EvidentlyPaginationConfiguration, + input: ListProjectsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListProjectsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Evidently) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof EvidentlyClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Evidently | EvidentlyClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-evidently/src/pagination/index.ts b/clients/client-evidently/src/pagination/index.ts new file mode 100644 index 000000000000..84dc18d8a97f --- /dev/null +++ b/clients/client-evidently/src/pagination/index.ts @@ -0,0 +1,5 @@ +export * from "./Interfaces"; +export * from "./ListExperimentsPaginator"; +export * from "./ListFeaturesPaginator"; +export * from "./ListLaunchesPaginator"; +export * from "./ListProjectsPaginator"; diff --git a/clients/client-evidently/src/protocols/Aws_restJson1.ts b/clients/client-evidently/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..2e5bbd93341d --- /dev/null +++ b/clients/client-evidently/src/protocols/Aws_restJson1.ts @@ -0,0 +1,5344 @@ +import { + HttpRequest as __HttpRequest, + HttpResponse as __HttpResponse, + isValidHostname as __isValidHostname, +} from "@aws-sdk/protocol-http"; +import { + expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, + expectLong as __expectLong, + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectObject as __expectObject, + expectString as __expectString, + expectUnion as __expectUnion, + extendedEncodeURIComponent as __extendedEncodeURIComponent, + LazyJsonString as __LazyJsonString, + limitedParseDouble as __limitedParseDouble, + parseEpochTimestamp as __parseEpochTimestamp, + serializeFloat as __serializeFloat, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { + BatchEvaluateFeatureCommandInput, + BatchEvaluateFeatureCommandOutput, +} from "../commands/BatchEvaluateFeatureCommand"; +import { CreateExperimentCommandInput, CreateExperimentCommandOutput } from "../commands/CreateExperimentCommand"; +import { CreateFeatureCommandInput, CreateFeatureCommandOutput } from "../commands/CreateFeatureCommand"; +import { CreateLaunchCommandInput, CreateLaunchCommandOutput } from "../commands/CreateLaunchCommand"; +import { CreateProjectCommandInput, CreateProjectCommandOutput } from "../commands/CreateProjectCommand"; +import { DeleteExperimentCommandInput, DeleteExperimentCommandOutput } from "../commands/DeleteExperimentCommand"; +import { DeleteFeatureCommandInput, DeleteFeatureCommandOutput } from "../commands/DeleteFeatureCommand"; +import { DeleteLaunchCommandInput, DeleteLaunchCommandOutput } from "../commands/DeleteLaunchCommand"; +import { DeleteProjectCommandInput, DeleteProjectCommandOutput } from "../commands/DeleteProjectCommand"; +import { EvaluateFeatureCommandInput, EvaluateFeatureCommandOutput } from "../commands/EvaluateFeatureCommand"; +import { GetExperimentCommandInput, GetExperimentCommandOutput } from "../commands/GetExperimentCommand"; +import { + GetExperimentResultsCommandInput, + GetExperimentResultsCommandOutput, +} from "../commands/GetExperimentResultsCommand"; +import { GetFeatureCommandInput, GetFeatureCommandOutput } from "../commands/GetFeatureCommand"; +import { GetLaunchCommandInput, GetLaunchCommandOutput } from "../commands/GetLaunchCommand"; +import { GetProjectCommandInput, GetProjectCommandOutput } from "../commands/GetProjectCommand"; +import { ListExperimentsCommandInput, ListExperimentsCommandOutput } from "../commands/ListExperimentsCommand"; +import { ListFeaturesCommandInput, ListFeaturesCommandOutput } from "../commands/ListFeaturesCommand"; +import { ListLaunchesCommandInput, ListLaunchesCommandOutput } from "../commands/ListLaunchesCommand"; +import { ListProjectsCommandInput, ListProjectsCommandOutput } from "../commands/ListProjectsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { PutProjectEventsCommandInput, PutProjectEventsCommandOutput } from "../commands/PutProjectEventsCommand"; +import { StartExperimentCommandInput, StartExperimentCommandOutput } from "../commands/StartExperimentCommand"; +import { StartLaunchCommandInput, StartLaunchCommandOutput } from "../commands/StartLaunchCommand"; +import { StopExperimentCommandInput, StopExperimentCommandOutput } from "../commands/StopExperimentCommand"; +import { StopLaunchCommandInput, StopLaunchCommandOutput } from "../commands/StopLaunchCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateExperimentCommandInput, UpdateExperimentCommandOutput } from "../commands/UpdateExperimentCommand"; +import { UpdateFeatureCommandInput, UpdateFeatureCommandOutput } from "../commands/UpdateFeatureCommand"; +import { UpdateLaunchCommandInput, UpdateLaunchCommandOutput } from "../commands/UpdateLaunchCommand"; +import { UpdateProjectCommandInput, UpdateProjectCommandOutput } from "../commands/UpdateProjectCommand"; +import { + UpdateProjectDataDeliveryCommandInput, + UpdateProjectDataDeliveryCommandOutput, +} from "../commands/UpdateProjectDataDeliveryCommand"; +import { + AccessDeniedException, + CloudWatchLogsDestination, + CloudWatchLogsDestinationConfig, + ConflictException, + EvaluationRequest, + EvaluationResult, + EvaluationRule, + Event, + Experiment, + ExperimentExecution, + ExperimentReport, + ExperimentReportName, + ExperimentResultRequestType, + ExperimentResultsData, + ExperimentSchedule, + Feature, + FeatureSummary, + InternalServerException, + Launch, + LaunchExecution, + LaunchGroup, + LaunchGroupConfig, + MetricDefinition, + MetricDefinitionConfig, + MetricGoal, + MetricGoalConfig, + MetricMonitor, + MetricMonitorConfig, + OnlineAbConfig, + OnlineAbDefinition, + Project, + ProjectDataDelivery, + ProjectDataDeliveryConfig, + ProjectSummary, + PutProjectEventsResultEntry, + ResourceNotFoundException, + S3Destination, + S3DestinationConfig, + ScheduledSplit, + ScheduledSplitConfig, + ScheduledSplitsLaunchConfig, + ScheduledSplitsLaunchDefinition, + ServiceQuotaExceededException, + ServiceUnavailableException, + ThrottlingException, + Treatment, + TreatmentConfig, + ValidationException, + ValidationExceptionField, + VariableValue, + Variation, + VariationConfig, +} from "../models/models_0"; + +export const serializeAws_restJson1BatchEvaluateFeatureCommand = async ( + input: BatchEvaluateFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/evaluations"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.requests !== undefined && + input.requests !== null && { requests: serializeAws_restJson1EvaluationRequestsList(input.requests, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "dataplane." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateExperimentCommand = async ( + input: CreateExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/experiments"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.metricGoals !== undefined && + input.metricGoals !== null && { + metricGoals: serializeAws_restJson1MetricGoalConfigList(input.metricGoals, context), + }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.onlineAbConfig !== undefined && + input.onlineAbConfig !== null && { + onlineAbConfig: serializeAws_restJson1OnlineAbConfig(input.onlineAbConfig, context), + }), + ...(input.randomizationSalt !== undefined && + input.randomizationSalt !== null && { randomizationSalt: input.randomizationSalt }), + ...(input.samplingRate !== undefined && input.samplingRate !== null && { samplingRate: input.samplingRate }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + ...(input.treatments !== undefined && + input.treatments !== null && { + treatments: serializeAws_restJson1TreatmentConfigList(input.treatments, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateFeatureCommand = async ( + input: CreateFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/features"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.defaultVariation !== undefined && + input.defaultVariation !== null && { defaultVariation: input.defaultVariation }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.entityOverrides !== undefined && + input.entityOverrides !== null && { + entityOverrides: serializeAws_restJson1EntityOverrideMap(input.entityOverrides, context), + }), + ...(input.evaluationStrategy !== undefined && + input.evaluationStrategy !== null && { evaluationStrategy: input.evaluationStrategy }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + ...(input.variations !== undefined && + input.variations !== null && { + variations: serializeAws_restJson1VariationConfigsList(input.variations, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateLaunchCommand = async ( + input: CreateLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/launches"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.groups !== undefined && + input.groups !== null && { groups: serializeAws_restJson1LaunchGroupConfigList(input.groups, context) }), + ...(input.metricMonitors !== undefined && + input.metricMonitors !== null && { + metricMonitors: serializeAws_restJson1MetricMonitorConfigList(input.metricMonitors, context), + }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.randomizationSalt !== undefined && + input.randomizationSalt !== null && { randomizationSalt: input.randomizationSalt }), + ...(input.scheduledSplitsConfig !== undefined && + input.scheduledSplitsConfig !== null && { + scheduledSplitsConfig: serializeAws_restJson1ScheduledSplitsLaunchConfig(input.scheduledSplitsConfig, context), + }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateProjectCommand = async ( + input: CreateProjectCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects"; + let body: any; + body = JSON.stringify({ + ...(input.dataDelivery !== undefined && + input.dataDelivery !== null && { + dataDelivery: serializeAws_restJson1ProjectDataDeliveryConfig(input.dataDelivery, context), + }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteExperimentCommand = async ( + input: DeleteExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteFeatureCommand = async ( + input: DeleteFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/features/{feature}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.feature !== undefined) { + const labelValue: string = input.feature; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: feature."); + } + resolvedPath = resolvedPath.replace("{feature}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: feature."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteLaunchCommand = async ( + input: DeleteLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/launches/{launch}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.launch !== undefined) { + const labelValue: string = input.launch; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: launch."); + } + resolvedPath = resolvedPath.replace("{launch}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: launch."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteProjectCommand = async ( + input: DeleteProjectCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1EvaluateFeatureCommand = async ( + input: EvaluateFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/evaluations/{feature}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.feature !== undefined) { + const labelValue: string = input.feature; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: feature."); + } + resolvedPath = resolvedPath.replace("{feature}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: feature."); + } + let body: any; + body = JSON.stringify({ + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.evaluationContext !== undefined && + input.evaluationContext !== null && { evaluationContext: __LazyJsonString.fromObject(input.evaluationContext) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "dataplane." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetExperimentCommand = async ( + input: GetExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetExperimentResultsCommand = async ( + input: GetExperimentResultsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}/results"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + body = JSON.stringify({ + ...(input.baseStat !== undefined && input.baseStat !== null && { baseStat: input.baseStat }), + ...(input.endTime !== undefined && + input.endTime !== null && { endTime: Math.round(input.endTime.getTime() / 1000) }), + ...(input.metricNames !== undefined && + input.metricNames !== null && { metricNames: serializeAws_restJson1MetricNameList(input.metricNames, context) }), + ...(input.period !== undefined && input.period !== null && { period: input.period }), + ...(input.reportNames !== undefined && + input.reportNames !== null && { + reportNames: serializeAws_restJson1ExperimentReportNameList(input.reportNames, context), + }), + ...(input.resultStats !== undefined && + input.resultStats !== null && { + resultStats: serializeAws_restJson1ExperimentResultRequestTypeList(input.resultStats, context), + }), + ...(input.startTime !== undefined && + input.startTime !== null && { startTime: Math.round(input.startTime.getTime() / 1000) }), + ...(input.treatmentNames !== undefined && + input.treatmentNames !== null && { + treatmentNames: serializeAws_restJson1TreatmentNameList(input.treatmentNames, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetFeatureCommand = async ( + input: GetFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/features/{feature}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.feature !== undefined) { + const labelValue: string = input.feature; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: feature."); + } + resolvedPath = resolvedPath.replace("{feature}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: feature."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetLaunchCommand = async ( + input: GetLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/launches/{launch}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.launch !== undefined) { + const labelValue: string = input.launch; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: launch."); + } + resolvedPath = resolvedPath.replace("{launch}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: launch."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetProjectCommand = async ( + input: GetProjectCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListExperimentsCommand = async ( + input: ListExperimentsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/experiments"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + const query: any = { + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListFeaturesCommand = async ( + input: ListFeaturesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/features"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + const query: any = { + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListLaunchesCommand = async ( + input: ListLaunchesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/launches"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + const query: any = { + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListProjectsCommand = async ( + input: ListProjectsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects"; + const query: any = { + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1PutProjectEventsCommand = async ( + input: PutProjectEventsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/events/projects/{project}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.events !== undefined && + input.events !== null && { events: serializeAws_restJson1EventList(input.events, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "dataplane." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StartExperimentCommand = async ( + input: StartExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}/start"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + body = JSON.stringify({ + ...(input.analysisCompleteTime !== undefined && + input.analysisCompleteTime !== null && { + analysisCompleteTime: Math.round(input.analysisCompleteTime.getTime() / 1000), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StartLaunchCommand = async ( + input: StartLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/launches/{launch}/start"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.launch !== undefined) { + const labelValue: string = input.launch; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: launch."); + } + resolvedPath = resolvedPath.replace("{launch}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: launch."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StopExperimentCommand = async ( + input: StopExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}/cancel"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + body = JSON.stringify({ + ...(input.desiredState !== undefined && input.desiredState !== null && { desiredState: input.desiredState }), + ...(input.reason !== undefined && input.reason !== null && { reason: input.reason }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StopLaunchCommand = async ( + input: StopLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/launches/{launch}/cancel"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.launch !== undefined) { + const labelValue: string = input.launch; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: launch."); + } + resolvedPath = resolvedPath.replace("{launch}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: launch."); + } + let body: any; + body = JSON.stringify({ + ...(input.desiredState !== undefined && input.desiredState !== null && { desiredState: input.desiredState }), + ...(input.reason !== undefined && input.reason !== null && { reason: input.reason }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + const query: any = { + ...(input.tagKeys !== undefined && { tagKeys: (input.tagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateExperimentCommand = async ( + input: UpdateExperimentCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/projects/{project}/experiments/{experiment}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.experiment !== undefined) { + const labelValue: string = input.experiment; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: experiment."); + } + resolvedPath = resolvedPath.replace("{experiment}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: experiment."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.metricGoals !== undefined && + input.metricGoals !== null && { + metricGoals: serializeAws_restJson1MetricGoalConfigList(input.metricGoals, context), + }), + ...(input.onlineAbConfig !== undefined && + input.onlineAbConfig !== null && { + onlineAbConfig: serializeAws_restJson1OnlineAbConfig(input.onlineAbConfig, context), + }), + ...(input.randomizationSalt !== undefined && + input.randomizationSalt !== null && { randomizationSalt: input.randomizationSalt }), + ...(input.samplingRate !== undefined && input.samplingRate !== null && { samplingRate: input.samplingRate }), + ...(input.treatments !== undefined && + input.treatments !== null && { + treatments: serializeAws_restJson1TreatmentConfigList(input.treatments, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateFeatureCommand = async ( + input: UpdateFeatureCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/features/{feature}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.feature !== undefined) { + const labelValue: string = input.feature; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: feature."); + } + resolvedPath = resolvedPath.replace("{feature}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: feature."); + } + let body: any; + body = JSON.stringify({ + ...(input.addOrUpdateVariations !== undefined && + input.addOrUpdateVariations !== null && { + addOrUpdateVariations: serializeAws_restJson1VariationConfigsList(input.addOrUpdateVariations, context), + }), + ...(input.defaultVariation !== undefined && + input.defaultVariation !== null && { defaultVariation: input.defaultVariation }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.entityOverrides !== undefined && + input.entityOverrides !== null && { + entityOverrides: serializeAws_restJson1EntityOverrideMap(input.entityOverrides, context), + }), + ...(input.evaluationStrategy !== undefined && + input.evaluationStrategy !== null && { evaluationStrategy: input.evaluationStrategy }), + ...(input.removeVariations !== undefined && + input.removeVariations !== null && { + removeVariations: serializeAws_restJson1VariationNameList(input.removeVariations, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateLaunchCommand = async ( + input: UpdateLaunchCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/launches/{launch}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + if (input.launch !== undefined) { + const labelValue: string = input.launch; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: launch."); + } + resolvedPath = resolvedPath.replace("{launch}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: launch."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.groups !== undefined && + input.groups !== null && { groups: serializeAws_restJson1LaunchGroupConfigList(input.groups, context) }), + ...(input.metricMonitors !== undefined && + input.metricMonitors !== null && { + metricMonitors: serializeAws_restJson1MetricMonitorConfigList(input.metricMonitors, context), + }), + ...(input.randomizationSalt !== undefined && + input.randomizationSalt !== null && { randomizationSalt: input.randomizationSalt }), + ...(input.scheduledSplitsConfig !== undefined && + input.scheduledSplitsConfig !== null && { + scheduledSplitsConfig: serializeAws_restJson1ScheduledSplitsLaunchConfig(input.scheduledSplitsConfig, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateProjectCommand = async ( + input: UpdateProjectCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateProjectDataDeliveryCommand = async ( + input: UpdateProjectDataDeliveryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/projects/{project}/data-delivery"; + if (input.project !== undefined) { + const labelValue: string = input.project; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: project."); + } + resolvedPath = resolvedPath.replace("{project}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: project."); + } + let body: any; + body = JSON.stringify({ + ...(input.cloudWatchLogs !== undefined && + input.cloudWatchLogs !== null && { + cloudWatchLogs: serializeAws_restJson1CloudWatchLogsDestinationConfig(input.cloudWatchLogs, context), + }), + ...(input.s3Destination !== undefined && + input.s3Destination !== null && { + s3Destination: serializeAws_restJson1S3DestinationConfig(input.s3Destination, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1BatchEvaluateFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchEvaluateFeatureCommandError(output, context); + } + const contents: BatchEvaluateFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + results: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.results !== undefined && data.results !== null) { + contents.results = deserializeAws_restJson1EvaluationResultsList(data.results, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchEvaluateFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateExperimentCommandError(output, context); + } + const contents: CreateExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + experiment: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.experiment !== undefined && data.experiment !== null) { + contents.experiment = deserializeAws_restJson1Experiment(data.experiment, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateFeatureCommandError(output, context); + } + const contents: CreateFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + feature: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.feature !== undefined && data.feature !== null) { + contents.feature = deserializeAws_restJson1Feature(data.feature, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateLaunchCommandError(output, context); + } + const contents: CreateLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + launch: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.launch !== undefined && data.launch !== null) { + contents.launch = deserializeAws_restJson1Launch(data.launch, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateProjectCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateProjectCommandError(output, context); + } + const contents: CreateProjectCommandOutput = { + $metadata: deserializeMetadata(output), + project: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.project !== undefined && data.project !== null) { + contents.project = deserializeAws_restJson1Project(data.project, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateProjectCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteExperimentCommandError(output, context); + } + const contents: DeleteExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.evidently#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.evidently#ServiceUnavailableException": + response = { + ...(await deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteFeatureCommandError(output, context); + } + const contents: DeleteFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteLaunchCommandError(output, context); + } + const contents: DeleteLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteProjectCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteProjectCommandError(output, context); + } + const contents: DeleteProjectCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteProjectCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1EvaluateFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1EvaluateFeatureCommandError(output, context); + } + const contents: EvaluateFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + details: undefined, + reason: undefined, + value: undefined, + variation: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.details !== undefined && data.details !== null) { + contents.details = new __LazyJsonString(data.details); + } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } + if (data.value !== undefined && data.value !== null) { + contents.value = deserializeAws_restJson1VariableValue(__expectUnion(data.value), context); + } + if (data.variation !== undefined && data.variation !== null) { + contents.variation = __expectString(data.variation); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1EvaluateFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetExperimentCommandError(output, context); + } + const contents: GetExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + experiment: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.experiment !== undefined && data.experiment !== null) { + contents.experiment = deserializeAws_restJson1Experiment(data.experiment, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetExperimentResultsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetExperimentResultsCommandError(output, context); + } + const contents: GetExperimentResultsCommandOutput = { + $metadata: deserializeMetadata(output), + reports: undefined, + resultsData: undefined, + timestamps: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.reports !== undefined && data.reports !== null) { + contents.reports = deserializeAws_restJson1ExperimentReportList(data.reports, context); + } + if (data.resultsData !== undefined && data.resultsData !== null) { + contents.resultsData = deserializeAws_restJson1ExperimentResultsDataList(data.resultsData, context); + } + if (data.timestamps !== undefined && data.timestamps !== null) { + contents.timestamps = deserializeAws_restJson1TimestampList(data.timestamps, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetExperimentResultsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetFeatureCommandError(output, context); + } + const contents: GetFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + feature: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.feature !== undefined && data.feature !== null) { + contents.feature = deserializeAws_restJson1Feature(data.feature, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetLaunchCommandError(output, context); + } + const contents: GetLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + launch: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.launch !== undefined && data.launch !== null) { + contents.launch = deserializeAws_restJson1Launch(data.launch, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetProjectCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetProjectCommandError(output, context); + } + const contents: GetProjectCommandOutput = { + $metadata: deserializeMetadata(output), + project: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.project !== undefined && data.project !== null) { + contents.project = deserializeAws_restJson1Project(data.project, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetProjectCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListExperimentsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListExperimentsCommandError(output, context); + } + const contents: ListExperimentsCommandOutput = { + $metadata: deserializeMetadata(output), + experiments: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.experiments !== undefined && data.experiments !== null) { + contents.experiments = deserializeAws_restJson1ExperimentList(data.experiments, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListExperimentsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListFeaturesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListFeaturesCommandError(output, context); + } + const contents: ListFeaturesCommandOutput = { + $metadata: deserializeMetadata(output), + features: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.features !== undefined && data.features !== null) { + contents.features = deserializeAws_restJson1FeatureSummariesList(data.features, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListFeaturesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListLaunchesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListLaunchesCommandError(output, context); + } + const contents: ListLaunchesCommandOutput = { + $metadata: deserializeMetadata(output), + launches: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.launches !== undefined && data.launches !== null) { + contents.launches = deserializeAws_restJson1LaunchesList(data.launches, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListLaunchesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListProjectsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListProjectsCommandError(output, context); + } + const contents: ListProjectsCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + projects: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.projects !== undefined && data.projects !== null) { + contents.projects = deserializeAws_restJson1ProjectSummariesList(data.projects, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListProjectsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.tags !== undefined && data.tags !== null) { + contents.tags = deserializeAws_restJson1TagMap(data.tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1PutProjectEventsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1PutProjectEventsCommandError(output, context); + } + const contents: PutProjectEventsCommandOutput = { + $metadata: deserializeMetadata(output), + eventResults: undefined, + failedEventCount: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.eventResults !== undefined && data.eventResults !== null) { + contents.eventResults = deserializeAws_restJson1PutProjectEventsResultEntryList(data.eventResults, context); + } + if (data.failedEventCount !== undefined && data.failedEventCount !== null) { + contents.failedEventCount = __expectInt32(data.failedEventCount); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1PutProjectEventsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StartExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StartExperimentCommandError(output, context); + } + const contents: StartExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + startedTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.startedTime !== undefined && data.startedTime !== null) { + contents.startedTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.startedTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StartExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StartLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StartLaunchCommandError(output, context); + } + const contents: StartLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + launch: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.launch !== undefined && data.launch !== null) { + contents.launch = deserializeAws_restJson1Launch(data.launch, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StartLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StopExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StopExperimentCommandError(output, context); + } + const contents: StopExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + endedTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.endedTime !== undefined && data.endedTime !== null) { + contents.endedTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.endedTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StopExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StopLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StopLaunchCommandError(output, context); + } + const contents: StopLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + endedTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.endedTime !== undefined && data.endedTime !== null) { + contents.endedTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.endedTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StopLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.evidently#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateExperimentCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateExperimentCommandError(output, context); + } + const contents: UpdateExperimentCommandOutput = { + $metadata: deserializeMetadata(output), + experiment: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.experiment !== undefined && data.experiment !== null) { + contents.experiment = deserializeAws_restJson1Experiment(data.experiment, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateExperimentCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateFeatureCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateFeatureCommandError(output, context); + } + const contents: UpdateFeatureCommandOutput = { + $metadata: deserializeMetadata(output), + feature: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.feature !== undefined && data.feature !== null) { + contents.feature = deserializeAws_restJson1Feature(data.feature, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateFeatureCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateLaunchCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateLaunchCommandError(output, context); + } + const contents: UpdateLaunchCommandOutput = { + $metadata: deserializeMetadata(output), + launch: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.launch !== undefined && data.launch !== null) { + contents.launch = deserializeAws_restJson1Launch(data.launch, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateLaunchCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateProjectCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateProjectCommandError(output, context); + } + const contents: UpdateProjectCommandOutput = { + $metadata: deserializeMetadata(output), + project: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.project !== undefined && data.project !== null) { + contents.project = deserializeAws_restJson1Project(data.project, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateProjectCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateProjectDataDeliveryCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateProjectDataDeliveryCommandError(output, context); + } + const contents: UpdateProjectDataDeliveryCommandOutput = { + $metadata: deserializeMetadata(output), + project: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.project !== undefined && data.project !== null) { + contents.project = deserializeAws_restJson1Project(data.project, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateProjectDataDeliveryCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.evidently#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.evidently#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.evidently#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.evidently#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.evidently#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + quotaCode: undefined, + resourceId: undefined, + resourceType: undefined, + serviceCode: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.quotaCode !== undefined && data.quotaCode !== null) { + contents.quotaCode = __expectString(data.quotaCode); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + if (data.serviceCode !== undefined && data.serviceCode !== null) { + contents.serviceCode = __expectString(data.serviceCode); + } + return contents; +}; + +const deserializeAws_restJson1ServiceUnavailableExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceUnavailableException = { + name: "ServiceUnavailableException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottlingException = { + name: "ThrottlingException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + quotaCode: undefined, + serviceCode: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.quotaCode !== undefined && data.quotaCode !== null) { + contents.quotaCode = __expectString(data.quotaCode); + } + if (data.serviceCode !== undefined && data.serviceCode !== null) { + contents.serviceCode = __expectString(data.serviceCode); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + fieldList: undefined, + message: undefined, + reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.fieldList !== undefined && data.fieldList !== null) { + contents.fieldList = deserializeAws_restJson1ValidationExceptionFieldList(data.fieldList, context); + } + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } + return contents; +}; + +const serializeAws_restJson1CloudWatchLogsDestinationConfig = ( + input: CloudWatchLogsDestinationConfig, + context: __SerdeContext +): any => { + return { + ...(input.logGroup !== undefined && input.logGroup !== null && { logGroup: input.logGroup }), + }; +}; + +const serializeAws_restJson1EntityOverrideMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1EvaluationRequest = (input: EvaluationRequest, context: __SerdeContext): any => { + return { + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.evaluationContext !== undefined && + input.evaluationContext !== null && { evaluationContext: __LazyJsonString.fromObject(input.evaluationContext) }), + ...(input.feature !== undefined && input.feature !== null && { feature: input.feature }), + }; +}; + +const serializeAws_restJson1EvaluationRequestsList = (input: EvaluationRequest[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1EvaluationRequest(entry, context); + }); +}; + +const serializeAws_restJson1Event = (input: Event, context: __SerdeContext): any => { + return { + ...(input.data !== undefined && input.data !== null && { data: __LazyJsonString.fromObject(input.data) }), + ...(input.timestamp !== undefined && + input.timestamp !== null && { timestamp: Math.round(input.timestamp.getTime() / 1000) }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }; +}; + +const serializeAws_restJson1EventList = (input: Event[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1Event(entry, context); + }); +}; + +const serializeAws_restJson1ExperimentReportNameList = ( + input: (ExperimentReportName | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1ExperimentResultRequestTypeList = ( + input: (ExperimentResultRequestType | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1GroupToWeightMap = (input: { [key: string]: number }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1LaunchGroupConfig = (input: LaunchGroupConfig, context: __SerdeContext): any => { + return { + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.feature !== undefined && input.feature !== null && { feature: input.feature }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.variation !== undefined && input.variation !== null && { variation: input.variation }), + }; +}; + +const serializeAws_restJson1LaunchGroupConfigList = (input: LaunchGroupConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1LaunchGroupConfig(entry, context); + }); +}; + +const serializeAws_restJson1MetricDefinitionConfig = (input: MetricDefinitionConfig, context: __SerdeContext): any => { + return { + ...(input.entityIdKey !== undefined && input.entityIdKey !== null && { entityIdKey: input.entityIdKey }), + ...(input.eventPattern !== undefined && + input.eventPattern !== null && { eventPattern: __LazyJsonString.fromObject(input.eventPattern) }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.unitLabel !== undefined && input.unitLabel !== null && { unitLabel: input.unitLabel }), + ...(input.valueKey !== undefined && input.valueKey !== null && { valueKey: input.valueKey }), + }; +}; + +const serializeAws_restJson1MetricGoalConfig = (input: MetricGoalConfig, context: __SerdeContext): any => { + return { + ...(input.desiredChange !== undefined && input.desiredChange !== null && { desiredChange: input.desiredChange }), + ...(input.metricDefinition !== undefined && + input.metricDefinition !== null && { + metricDefinition: serializeAws_restJson1MetricDefinitionConfig(input.metricDefinition, context), + }), + }; +}; + +const serializeAws_restJson1MetricGoalConfigList = (input: MetricGoalConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1MetricGoalConfig(entry, context); + }); +}; + +const serializeAws_restJson1MetricMonitorConfig = (input: MetricMonitorConfig, context: __SerdeContext): any => { + return { + ...(input.metricDefinition !== undefined && + input.metricDefinition !== null && { + metricDefinition: serializeAws_restJson1MetricDefinitionConfig(input.metricDefinition, context), + }), + }; +}; + +const serializeAws_restJson1MetricMonitorConfigList = (input: MetricMonitorConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1MetricMonitorConfig(entry, context); + }); +}; + +const serializeAws_restJson1MetricNameList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1OnlineAbConfig = (input: OnlineAbConfig, context: __SerdeContext): any => { + return { + ...(input.controlTreatmentName !== undefined && + input.controlTreatmentName !== null && { controlTreatmentName: input.controlTreatmentName }), + ...(input.treatmentWeights !== undefined && + input.treatmentWeights !== null && { + treatmentWeights: serializeAws_restJson1TreatmentToWeightMap(input.treatmentWeights, context), + }), + }; +}; + +const serializeAws_restJson1ProjectDataDeliveryConfig = ( + input: ProjectDataDeliveryConfig, + context: __SerdeContext +): any => { + return { + ...(input.cloudWatchLogs !== undefined && + input.cloudWatchLogs !== null && { + cloudWatchLogs: serializeAws_restJson1CloudWatchLogsDestinationConfig(input.cloudWatchLogs, context), + }), + ...(input.s3Destination !== undefined && + input.s3Destination !== null && { + s3Destination: serializeAws_restJson1S3DestinationConfig(input.s3Destination, context), + }), + }; +}; + +const serializeAws_restJson1S3DestinationConfig = (input: S3DestinationConfig, context: __SerdeContext): any => { + return { + ...(input.bucket !== undefined && input.bucket !== null && { bucket: input.bucket }), + ...(input.prefix !== undefined && input.prefix !== null && { prefix: input.prefix }), + }; +}; + +const serializeAws_restJson1ScheduledSplitConfig = (input: ScheduledSplitConfig, context: __SerdeContext): any => { + return { + ...(input.groupWeights !== undefined && + input.groupWeights !== null && { + groupWeights: serializeAws_restJson1GroupToWeightMap(input.groupWeights, context), + }), + ...(input.startTime !== undefined && + input.startTime !== null && { startTime: Math.round(input.startTime.getTime() / 1000) }), + }; +}; + +const serializeAws_restJson1ScheduledSplitConfigList = ( + input: ScheduledSplitConfig[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ScheduledSplitConfig(entry, context); + }); +}; + +const serializeAws_restJson1ScheduledSplitsLaunchConfig = ( + input: ScheduledSplitsLaunchConfig, + context: __SerdeContext +): any => { + return { + ...(input.steps !== undefined && + input.steps !== null && { steps: serializeAws_restJson1ScheduledSplitConfigList(input.steps, context) }), + }; +}; + +const serializeAws_restJson1TagMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1TreatmentConfig = (input: TreatmentConfig, context: __SerdeContext): any => { + return { + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.feature !== undefined && input.feature !== null && { feature: input.feature }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.variation !== undefined && input.variation !== null && { variation: input.variation }), + }; +}; + +const serializeAws_restJson1TreatmentConfigList = (input: TreatmentConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1TreatmentConfig(entry, context); + }); +}; + +const serializeAws_restJson1TreatmentNameList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1TreatmentToWeightMap = (input: { [key: string]: number }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1VariableValue = (input: VariableValue, context: __SerdeContext): any => { + return VariableValue.visit(input, { + boolValue: (value) => ({ boolValue: value }), + doubleValue: (value) => ({ doubleValue: __serializeFloat(value) }), + longValue: (value) => ({ longValue: value }), + stringValue: (value) => ({ stringValue: value }), + _: (name, value) => ({ name: value } as any), + }); +}; + +const serializeAws_restJson1VariationConfig = (input: VariationConfig, context: __SerdeContext): any => { + return { + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.value !== undefined && + input.value !== null && { value: serializeAws_restJson1VariableValue(input.value, context) }), + }; +}; + +const serializeAws_restJson1VariationConfigsList = (input: VariationConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1VariationConfig(entry, context); + }); +}; + +const serializeAws_restJson1VariationNameList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const deserializeAws_restJson1CloudWatchLogsDestination = ( + output: any, + context: __SerdeContext +): CloudWatchLogsDestination => { + return { + logGroup: __expectString(output.logGroup), + } as any; +}; + +const deserializeAws_restJson1DoubleValueList = (output: any, context: __SerdeContext): number[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __limitedParseDouble(entry) as any; + }); +}; + +const deserializeAws_restJson1EntityOverrideMap = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1EvaluationResult = (output: any, context: __SerdeContext): EvaluationResult => { + return { + details: output.details !== undefined && output.details !== null ? new __LazyJsonString(output.details) : undefined, + entityId: __expectString(output.entityId), + feature: __expectString(output.feature), + project: __expectString(output.project), + reason: __expectString(output.reason), + value: + output.value !== undefined && output.value !== null + ? deserializeAws_restJson1VariableValue(__expectUnion(output.value), context) + : undefined, + variation: __expectString(output.variation), + } as any; +}; + +const deserializeAws_restJson1EvaluationResultsList = (output: any, context: __SerdeContext): EvaluationResult[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1EvaluationResult(entry, context); + }); +}; + +const deserializeAws_restJson1EvaluationRule = (output: any, context: __SerdeContext): EvaluationRule => { + return { + name: __expectString(output.name), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1EvaluationRulesList = (output: any, context: __SerdeContext): EvaluationRule[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1EvaluationRule(entry, context); + }); +}; + +const deserializeAws_restJson1Experiment = (output: any, context: __SerdeContext): Experiment => { + return { + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + description: __expectString(output.description), + execution: + output.execution !== undefined && output.execution !== null + ? deserializeAws_restJson1ExperimentExecution(output.execution, context) + : undefined, + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + metricGoals: + output.metricGoals !== undefined && output.metricGoals !== null + ? deserializeAws_restJson1MetricGoalsList(output.metricGoals, context) + : undefined, + name: __expectString(output.name), + onlineAbDefinition: + output.onlineAbDefinition !== undefined && output.onlineAbDefinition !== null + ? deserializeAws_restJson1OnlineAbDefinition(output.onlineAbDefinition, context) + : undefined, + project: __expectString(output.project), + randomizationSalt: __expectString(output.randomizationSalt), + samplingRate: __expectLong(output.samplingRate), + schedule: + output.schedule !== undefined && output.schedule !== null + ? deserializeAws_restJson1ExperimentSchedule(output.schedule, context) + : undefined, + status: __expectString(output.status), + statusReason: __expectString(output.statusReason), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + treatments: + output.treatments !== undefined && output.treatments !== null + ? deserializeAws_restJson1TreatmentList(output.treatments, context) + : undefined, + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1ExperimentExecution = (output: any, context: __SerdeContext): ExperimentExecution => { + return { + endedTime: + output.endedTime !== undefined && output.endedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.endedTime))) + : undefined, + startedTime: + output.startedTime !== undefined && output.startedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.startedTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentList = (output: any, context: __SerdeContext): Experiment[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Experiment(entry, context); + }); +}; + +const deserializeAws_restJson1ExperimentReport = (output: any, context: __SerdeContext): ExperimentReport => { + return { + content: output.content !== undefined && output.content !== null ? new __LazyJsonString(output.content) : undefined, + metricName: __expectString(output.metricName), + reportName: __expectString(output.reportName), + treatmentName: __expectString(output.treatmentName), + } as any; +}; + +const deserializeAws_restJson1ExperimentReportList = (output: any, context: __SerdeContext): ExperimentReport[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ExperimentReport(entry, context); + }); +}; + +const deserializeAws_restJson1ExperimentResultsData = (output: any, context: __SerdeContext): ExperimentResultsData => { + return { + metricName: __expectString(output.metricName), + resultStat: __expectString(output.resultStat), + treatmentName: __expectString(output.treatmentName), + values: + output.values !== undefined && output.values !== null + ? deserializeAws_restJson1DoubleValueList(output.values, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ExperimentResultsDataList = ( + output: any, + context: __SerdeContext +): ExperimentResultsData[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ExperimentResultsData(entry, context); + }); +}; + +const deserializeAws_restJson1ExperimentSchedule = (output: any, context: __SerdeContext): ExperimentSchedule => { + return { + analysisCompleteTime: + output.analysisCompleteTime !== undefined && output.analysisCompleteTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.analysisCompleteTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Feature = (output: any, context: __SerdeContext): Feature => { + return { + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + defaultVariation: __expectString(output.defaultVariation), + description: __expectString(output.description), + entityOverrides: + output.entityOverrides !== undefined && output.entityOverrides !== null + ? deserializeAws_restJson1EntityOverrideMap(output.entityOverrides, context) + : undefined, + evaluationRules: + output.evaluationRules !== undefined && output.evaluationRules !== null + ? deserializeAws_restJson1EvaluationRulesList(output.evaluationRules, context) + : undefined, + evaluationStrategy: __expectString(output.evaluationStrategy), + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + name: __expectString(output.name), + project: __expectString(output.project), + status: __expectString(output.status), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + valueType: __expectString(output.valueType), + variations: + output.variations !== undefined && output.variations !== null + ? deserializeAws_restJson1VariationsList(output.variations, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FeatureSummariesList = (output: any, context: __SerdeContext): FeatureSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1FeatureSummary(entry, context); + }); +}; + +const deserializeAws_restJson1FeatureSummary = (output: any, context: __SerdeContext): FeatureSummary => { + return { + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + defaultVariation: __expectString(output.defaultVariation), + evaluationRules: + output.evaluationRules !== undefined && output.evaluationRules !== null + ? deserializeAws_restJson1EvaluationRulesList(output.evaluationRules, context) + : undefined, + evaluationStrategy: __expectString(output.evaluationStrategy), + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + name: __expectString(output.name), + project: __expectString(output.project), + status: __expectString(output.status), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FeatureToVariationMap = ( + output: any, + context: __SerdeContext +): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1GroupToWeightMap = (output: any, context: __SerdeContext): { [key: string]: number } => { + return Object.entries(output).reduce((acc: { [key: string]: number }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectLong(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1Launch = (output: any, context: __SerdeContext): Launch => { + return { + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + description: __expectString(output.description), + execution: + output.execution !== undefined && output.execution !== null + ? deserializeAws_restJson1LaunchExecution(output.execution, context) + : undefined, + groups: + output.groups !== undefined && output.groups !== null + ? deserializeAws_restJson1LaunchGroupList(output.groups, context) + : undefined, + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + metricMonitors: + output.metricMonitors !== undefined && output.metricMonitors !== null + ? deserializeAws_restJson1MetricMonitorList(output.metricMonitors, context) + : undefined, + name: __expectString(output.name), + project: __expectString(output.project), + randomizationSalt: __expectString(output.randomizationSalt), + scheduledSplitsDefinition: + output.scheduledSplitsDefinition !== undefined && output.scheduledSplitsDefinition !== null + ? deserializeAws_restJson1ScheduledSplitsLaunchDefinition(output.scheduledSplitsDefinition, context) + : undefined, + status: __expectString(output.status), + statusReason: __expectString(output.statusReason), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1LaunchesList = (output: any, context: __SerdeContext): Launch[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Launch(entry, context); + }); +}; + +const deserializeAws_restJson1LaunchExecution = (output: any, context: __SerdeContext): LaunchExecution => { + return { + endedTime: + output.endedTime !== undefined && output.endedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.endedTime))) + : undefined, + startedTime: + output.startedTime !== undefined && output.startedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.startedTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LaunchGroup = (output: any, context: __SerdeContext): LaunchGroup => { + return { + description: __expectString(output.description), + featureVariations: + output.featureVariations !== undefined && output.featureVariations !== null + ? deserializeAws_restJson1FeatureToVariationMap(output.featureVariations, context) + : undefined, + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_restJson1LaunchGroupList = (output: any, context: __SerdeContext): LaunchGroup[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LaunchGroup(entry, context); + }); +}; + +const deserializeAws_restJson1MetricDefinition = (output: any, context: __SerdeContext): MetricDefinition => { + return { + entityIdKey: __expectString(output.entityIdKey), + eventPattern: + output.eventPattern !== undefined && output.eventPattern !== null + ? new __LazyJsonString(output.eventPattern) + : undefined, + name: __expectString(output.name), + unitLabel: __expectString(output.unitLabel), + valueKey: __expectString(output.valueKey), + } as any; +}; + +const deserializeAws_restJson1MetricGoal = (output: any, context: __SerdeContext): MetricGoal => { + return { + desiredChange: __expectString(output.desiredChange), + metricDefinition: + output.metricDefinition !== undefined && output.metricDefinition !== null + ? deserializeAws_restJson1MetricDefinition(output.metricDefinition, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1MetricGoalsList = (output: any, context: __SerdeContext): MetricGoal[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1MetricGoal(entry, context); + }); +}; + +const deserializeAws_restJson1MetricMonitor = (output: any, context: __SerdeContext): MetricMonitor => { + return { + metricDefinition: + output.metricDefinition !== undefined && output.metricDefinition !== null + ? deserializeAws_restJson1MetricDefinition(output.metricDefinition, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1MetricMonitorList = (output: any, context: __SerdeContext): MetricMonitor[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1MetricMonitor(entry, context); + }); +}; + +const deserializeAws_restJson1OnlineAbDefinition = (output: any, context: __SerdeContext): OnlineAbDefinition => { + return { + controlTreatmentName: __expectString(output.controlTreatmentName), + treatmentWeights: + output.treatmentWeights !== undefined && output.treatmentWeights !== null + ? deserializeAws_restJson1TreatmentToWeightMap(output.treatmentWeights, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Project = (output: any, context: __SerdeContext): Project => { + return { + activeExperimentCount: __expectLong(output.activeExperimentCount), + activeLaunchCount: __expectLong(output.activeLaunchCount), + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + dataDelivery: + output.dataDelivery !== undefined && output.dataDelivery !== null + ? deserializeAws_restJson1ProjectDataDelivery(output.dataDelivery, context) + : undefined, + description: __expectString(output.description), + experimentCount: __expectLong(output.experimentCount), + featureCount: __expectLong(output.featureCount), + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + launchCount: __expectLong(output.launchCount), + name: __expectString(output.name), + status: __expectString(output.status), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ProjectDataDelivery = (output: any, context: __SerdeContext): ProjectDataDelivery => { + return { + cloudWatchLogs: + output.cloudWatchLogs !== undefined && output.cloudWatchLogs !== null + ? deserializeAws_restJson1CloudWatchLogsDestination(output.cloudWatchLogs, context) + : undefined, + s3Destination: + output.s3Destination !== undefined && output.s3Destination !== null + ? deserializeAws_restJson1S3Destination(output.s3Destination, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ProjectSummariesList = (output: any, context: __SerdeContext): ProjectSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ProjectSummary(entry, context); + }); +}; + +const deserializeAws_restJson1ProjectSummary = (output: any, context: __SerdeContext): ProjectSummary => { + return { + activeExperimentCount: __expectLong(output.activeExperimentCount), + activeLaunchCount: __expectLong(output.activeLaunchCount), + arn: __expectString(output.arn), + createdTime: + output.createdTime !== undefined && output.createdTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdTime))) + : undefined, + description: __expectString(output.description), + experimentCount: __expectLong(output.experimentCount), + featureCount: __expectLong(output.featureCount), + lastUpdatedTime: + output.lastUpdatedTime !== undefined && output.lastUpdatedTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastUpdatedTime))) + : undefined, + launchCount: __expectLong(output.launchCount), + name: __expectString(output.name), + status: __expectString(output.status), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PutProjectEventsResultEntry = ( + output: any, + context: __SerdeContext +): PutProjectEventsResultEntry => { + return { + errorCode: __expectString(output.errorCode), + errorMessage: __expectString(output.errorMessage), + eventId: __expectString(output.eventId), + } as any; +}; + +const deserializeAws_restJson1PutProjectEventsResultEntryList = ( + output: any, + context: __SerdeContext +): PutProjectEventsResultEntry[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PutProjectEventsResultEntry(entry, context); + }); +}; + +const deserializeAws_restJson1S3Destination = (output: any, context: __SerdeContext): S3Destination => { + return { + bucket: __expectString(output.bucket), + prefix: __expectString(output.prefix), + } as any; +}; + +const deserializeAws_restJson1ScheduledSplit = (output: any, context: __SerdeContext): ScheduledSplit => { + return { + groupWeights: + output.groupWeights !== undefined && output.groupWeights !== null + ? deserializeAws_restJson1GroupToWeightMap(output.groupWeights, context) + : undefined, + startTime: + output.startTime !== undefined && output.startTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.startTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ScheduledSplitsLaunchDefinition = ( + output: any, + context: __SerdeContext +): ScheduledSplitsLaunchDefinition => { + return { + steps: + output.steps !== undefined && output.steps !== null + ? deserializeAws_restJson1ScheduledStepList(output.steps, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ScheduledStepList = (output: any, context: __SerdeContext): ScheduledSplit[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ScheduledSplit(entry, context); + }); +}; + +const deserializeAws_restJson1TagMap = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1TimestampList = (output: any, context: __SerdeContext): Date[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectNonNull(__parseEpochTimestamp(__expectNumber(entry))); + }); +}; + +const deserializeAws_restJson1Treatment = (output: any, context: __SerdeContext): Treatment => { + return { + description: __expectString(output.description), + featureVariations: + output.featureVariations !== undefined && output.featureVariations !== null + ? deserializeAws_restJson1FeatureToVariationMap(output.featureVariations, context) + : undefined, + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_restJson1TreatmentList = (output: any, context: __SerdeContext): Treatment[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Treatment(entry, context); + }); +}; + +const deserializeAws_restJson1TreatmentToWeightMap = ( + output: any, + context: __SerdeContext +): { [key: string]: number } => { + return Object.entries(output).reduce((acc: { [key: string]: number }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectLong(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1ValidationExceptionField = ( + output: any, + context: __SerdeContext +): ValidationExceptionField => { + return { + message: __expectString(output.message), + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_restJson1ValidationExceptionFieldList = ( + output: any, + context: __SerdeContext +): ValidationExceptionField[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ValidationExceptionField(entry, context); + }); +}; + +const deserializeAws_restJson1VariableValue = (output: any, context: __SerdeContext): VariableValue => { + if (__expectBoolean(output.boolValue) !== undefined) { + return { boolValue: __expectBoolean(output.boolValue) as any }; + } + if (__limitedParseDouble(output.doubleValue) !== undefined) { + return { doubleValue: __limitedParseDouble(output.doubleValue) as any }; + } + if (__expectLong(output.longValue) !== undefined) { + return { longValue: __expectLong(output.longValue) as any }; + } + if (__expectString(output.stringValue) !== undefined) { + return { stringValue: __expectString(output.stringValue) as any }; + } + return { $unknown: Object.entries(output)[0] }; +}; + +const deserializeAws_restJson1Variation = (output: any, context: __SerdeContext): Variation => { + return { + name: __expectString(output.name), + value: + output.value !== undefined && output.value !== null + ? deserializeAws_restJson1VariableValue(__expectUnion(output.value), context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1VariationsList = (output: any, context: __SerdeContext): Variation[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Variation(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-evidently/src/runtimeConfig.browser.ts b/clients/client-evidently/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..957241c2ba2f --- /dev/null +++ b/clients/client-evidently/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { EvidentlyClientConfig } from "./EvidentlyClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: EvidentlyClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-evidently/src/runtimeConfig.native.ts b/clients/client-evidently/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..784010334c18 --- /dev/null +++ b/clients/client-evidently/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { EvidentlyClientConfig } from "./EvidentlyClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: EvidentlyClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-evidently/src/runtimeConfig.shared.ts b/clients/client-evidently/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..9bacceaf959f --- /dev/null +++ b/clients/client-evidently/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { EvidentlyClientConfig } from "./EvidentlyClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: EvidentlyClientConfig) => ({ + apiVersion: "2021-02-01", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "Evidently", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-evidently/src/runtimeConfig.ts b/clients/client-evidently/src/runtimeConfig.ts new file mode 100644 index 000000000000..950ff6c9c3b2 --- /dev/null +++ b/clients/client-evidently/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { EvidentlyClientConfig } from "./EvidentlyClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: EvidentlyClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-evidently/tsconfig.es.json b/clients/client-evidently/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-evidently/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-evidently/tsconfig.json b/clients/client-evidently/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-evidently/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-evidently/tsconfig.types.json b/clients/client-evidently/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-evidently/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-fsx/src/FSx.ts b/clients/client-fsx/src/FSx.ts index 55110379c595..48e0f0a7ef76 100644 --- a/clients/client-fsx/src/FSx.ts +++ b/clients/client-fsx/src/FSx.ts @@ -16,6 +16,11 @@ import { CreateBackupCommandInput, CreateBackupCommandOutput, } from "./commands/CreateBackupCommand"; +import { + CreateDataRepositoryAssociationCommand, + CreateDataRepositoryAssociationCommandInput, + CreateDataRepositoryAssociationCommandOutput, +} from "./commands/CreateDataRepositoryAssociationCommand"; import { CreateDataRepositoryTaskCommand, CreateDataRepositoryTaskCommandInput, @@ -31,6 +36,11 @@ import { CreateFileSystemFromBackupCommandInput, CreateFileSystemFromBackupCommandOutput, } from "./commands/CreateFileSystemFromBackupCommand"; +import { + CreateSnapshotCommand, + CreateSnapshotCommandInput, + CreateSnapshotCommandOutput, +} from "./commands/CreateSnapshotCommand"; import { CreateStorageVirtualMachineCommand, CreateStorageVirtualMachineCommandInput, @@ -51,11 +61,21 @@ import { DeleteBackupCommandInput, DeleteBackupCommandOutput, } from "./commands/DeleteBackupCommand"; +import { + DeleteDataRepositoryAssociationCommand, + DeleteDataRepositoryAssociationCommandInput, + DeleteDataRepositoryAssociationCommandOutput, +} from "./commands/DeleteDataRepositoryAssociationCommand"; import { DeleteFileSystemCommand, DeleteFileSystemCommandInput, DeleteFileSystemCommandOutput, } from "./commands/DeleteFileSystemCommand"; +import { + DeleteSnapshotCommand, + DeleteSnapshotCommandInput, + DeleteSnapshotCommandOutput, +} from "./commands/DeleteSnapshotCommand"; import { DeleteStorageVirtualMachineCommand, DeleteStorageVirtualMachineCommandInput, @@ -71,6 +91,11 @@ import { DescribeBackupsCommandInput, DescribeBackupsCommandOutput, } from "./commands/DescribeBackupsCommand"; +import { + DescribeDataRepositoryAssociationsCommand, + DescribeDataRepositoryAssociationsCommandInput, + DescribeDataRepositoryAssociationsCommandOutput, +} from "./commands/DescribeDataRepositoryAssociationsCommand"; import { DescribeDataRepositoryTasksCommand, DescribeDataRepositoryTasksCommandInput, @@ -86,6 +111,11 @@ import { DescribeFileSystemsCommandInput, DescribeFileSystemsCommandOutput, } from "./commands/DescribeFileSystemsCommand"; +import { + DescribeSnapshotsCommand, + DescribeSnapshotsCommandInput, + DescribeSnapshotsCommandOutput, +} from "./commands/DescribeSnapshotsCommand"; import { DescribeStorageVirtualMachinesCommand, DescribeStorageVirtualMachinesCommandInput, @@ -106,17 +136,37 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + ReleaseFileSystemNfsV3LocksCommand, + ReleaseFileSystemNfsV3LocksCommandInput, + ReleaseFileSystemNfsV3LocksCommandOutput, +} from "./commands/ReleaseFileSystemNfsV3LocksCommand"; +import { + RestoreVolumeFromSnapshotCommand, + RestoreVolumeFromSnapshotCommandInput, + RestoreVolumeFromSnapshotCommandOutput, +} from "./commands/RestoreVolumeFromSnapshotCommand"; import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommand, UntagResourceCommandInput, UntagResourceCommandOutput, } from "./commands/UntagResourceCommand"; +import { + UpdateDataRepositoryAssociationCommand, + UpdateDataRepositoryAssociationCommandInput, + UpdateDataRepositoryAssociationCommandOutput, +} from "./commands/UpdateDataRepositoryAssociationCommand"; import { UpdateFileSystemCommand, UpdateFileSystemCommandInput, UpdateFileSystemCommandOutput, } from "./commands/UpdateFileSystemCommand"; +import { + UpdateSnapshotCommand, + UpdateSnapshotCommandInput, + UpdateSnapshotCommandOutput, +} from "./commands/UpdateSnapshotCommand"; import { UpdateStorageVirtualMachineCommand, UpdateStorageVirtualMachineCommandInput, @@ -224,24 +274,24 @@ export class FSx extends FSxClient { *

                                  Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region * (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five * backup copy requests in progress to a single destination Region per account.

                                  - *

                                  You can use cross-Region backup copies for cross-region disaster recovery. - * You periodically take backups and copy them to another Region so that in the - * event of a disaster in the primary Region, you can restore from backup and recover - * availability quickly in the other Region. You can make cross-Region copies - * only within your Amazon Web Services partition.

                                  - *

                                  You can also use backup copies to clone your file data set to another Region - * or within the same Region.

                                  + *

                                  You can use cross-Region backup copies for cross-Region disaster recovery. You can + * periodically take backups and copy them to another Region so that in the event of a + * disaster in the primary Region, you can restore from backup and recover availability + * quickly in the other Region. You can make cross-Region copies only within your Amazon Web Services partition. A partition is a grouping of Regions. Amazon Web Services currently + * has three partitions: aws (Standard Regions), aws-cn (China + * Regions), and aws-us-gov (Amazon Web Services GovCloud [US] Regions).

                                  + *

                                  You can also use backup copies to clone your file dataset to another Region or within + * the same Region.

                                  *

                                  You can use the SourceRegion parameter to specify the Amazon Web Services Region * from which the backup will be copied. For example, if you make the call from the * us-west-1 Region and want to copy a backup from the us-east-2 * Region, you specify us-east-2 in the SourceRegion parameter * to make a cross-Region copy. If you don't specify a Region, the backup copy is * created in the same Region where the request is sent from (in-Region copy).

                                  - *

                                  For more information on creating backup copies, see - * - * Copying backups in the Amazon FSx for Windows User Guide and - * Copying backups - * in the Amazon FSx for Lustre User Guide.

                                  + *

                                  For more information about creating backup copies, see Copying backups + * in the Amazon FSx for Windows User Guide, Copying backups in the Amazon FSx for Lustre User + * Guide, and Copying backups in the Amazon FSx for OpenZFS User + * Guide.

                                  */ public copyBackup(args: CopyBackupCommandInput, options?: __HttpHandlerOptions): Promise; public copyBackup(args: CopyBackupCommandInput, cb: (err: any, data?: CopyBackupCommandOutput) => void): void; @@ -267,42 +317,44 @@ export class FSx extends FSxClient { } /** - *

                                  Creates a backup of an existing Amazon FSx for Windows File Server - * or Amazon FSx for Lustre file system, or of an Amazon FSx for NetApp ONTAP - * volume. Creating regular backups is a best practice, enabling you to restore - * a file system or volume from a backup if an issue arises with the original - * file system or volume.

                                  - *

                                  For Amazon FSx for Lustre file systems, you can create a backup only - * for file systems with the following configuration:

                                  + *

                                  Creates a backup of an existing Amazon FSx for Windows File Server file + * system, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP + * volume, or Amazon FSx for OpenZFS file system. We recommend creating regular + * backups so that you can restore a file system or volume from a backup if an issue arises + * with the original file system or volume.

                                  + *

                                  For Amazon FSx for Lustre file systems, you can create a backup only for file + * systems that have the following configuration:

                                  *
                                    *
                                  • - *

                                    a Persistent deployment type

                                    + *

                                    A Persistent deployment type

                                    *
                                  • *
                                  • - *

                                    is not linked to a data repository.

                                    + *

                                    Are not linked to a data repository

                                    *
                                  • *
                                  *

                                  For more information about backups, see the following:

                                  * - *

                                  If a backup with the specified client request token exists, and the parameters - * match, this operation returns the description of the existing backup. If a backup - * specified client request token exists, and the parameters don't match, this - * operation returns IncompatibleParameterError. If a backup with the - * specified client request token doesn't exist, CreateBackup does the - * following:

                                  + *

                                  If a backup with the specified client request token exists and the parameters match, + * this operation returns the description of the existing backup. If a backup with the + * specified client request token exists and the parameters don't match, this operation + * returns IncompatibleParameterError. If a backup with the specified client + * request token doesn't exist, CreateBackup does the following:

                                  *
                                    *
                                  • *

                                    Creates a new Amazon FSx backup with an assigned ID, and an initial @@ -319,10 +371,10 @@ export class FSx extends FSxClient { * you use the same client request token and the initial call created a backup, the * operation returns a successful result because all the parameters are the same.

                                    * - *

                                    The CreateBackup operation returns while the backup's - * lifecycle state is still CREATING. You can check the backup creation - * status by calling the DescribeBackups operation, which returns the - * backup state along with other information.

                                    + *

                                    The CreateBackup operation returns while the backup's lifecycle state is + * still CREATING. You can check the backup creation status by calling the + * DescribeBackups operation, which returns the backup state along with other + * information.

                                    */ public createBackup( args: CreateBackupCommandInput, @@ -350,16 +402,59 @@ export class FSx extends FSxClient { } } + /** + *

                                    Creates an Amazon FSx for Lustre data repository association (DRA). A data + * repository association is a link between a directory on the file system and + * an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository + * associations on a file system. Data repository associations are supported only + * for file systems with the Persistent_2 deployment type.

                                    + *

                                    Each data repository association must have a unique Amazon FSx file + * system directory and a unique S3 bucket or prefix associated with it. You + * can configure a data repository association for automatic import only, + * for automatic export only, or for both. To learn more about linking a + * data repository to your file system, see + * Linking your file system to an S3 bucket.

                                    + */ + public createDataRepositoryAssociation( + args: CreateDataRepositoryAssociationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createDataRepositoryAssociation( + args: CreateDataRepositoryAssociationCommandInput, + cb: (err: any, data?: CreateDataRepositoryAssociationCommandOutput) => void + ): void; + public createDataRepositoryAssociation( + args: CreateDataRepositoryAssociationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateDataRepositoryAssociationCommandOutput) => void + ): void; + public createDataRepositoryAssociation( + args: CreateDataRepositoryAssociationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateDataRepositoryAssociationCommandOutput) => void), + cb?: (err: any, data?: CreateDataRepositoryAssociationCommandOutput) => void + ): Promise | void { + const command = new CreateDataRepositoryAssociationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                    Creates an Amazon FSx for Lustre data repository task. You use data repository tasks * to perform bulk operations between your Amazon FSx file system and its linked data - * repository. An example of a data repository task is - * exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A - * CreateDataRepositoryTask operation will fail if a data repository is not - * linked to the FSx file system. To learn more about data repository tasks, see + * repositories. An example of a data repository task is exporting any data and metadata + * changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) + * from your FSx file system to a linked data repository. A CreateDataRepositoryTask + * operation will fail if a data repository is not linked to the FSx file system. To learn + * more about data repository tasks, see * Data Repository Tasks. * To learn more about linking a data repository to your file system, see - * Linking your file system to an S3 bucket.

                                    + * Linking your file system to an S3 bucket.

                                    */ public createDataRepositoryTask( args: CreateDataRepositoryTaskCommandInput, @@ -391,24 +486,19 @@ export class FSx extends FSxClient { } /** - *

                                    Creates a new, empty Amazon FSx file system.

                                    - * - *

                                    If a file system with the specified client request token exists and the parameters - * match, CreateFileSystem returns the description of the existing file - * system. If a file system specified client request token exists and the parameters - * don't match, this call returns IncompatibleParameterError. If a file - * system with the specified client request token doesn't exist, - * CreateFileSystem does the following:

                                    + *

                                    Creates a new, empty Amazon FSx file system. You can create the following supported + * Amazon FSx file systems using the CreateFileSystem API operation:

                                    *
                                      *
                                    • - *

                                      Creates a new, empty Amazon FSx file system with an assigned ID, and an - * initial lifecycle state of CREATING.

                                      + *

                                      Amazon FSx for Lustre

                                      *
                                    • *
                                    • - *

                                      Returns the description of the file system.

                                      + *

                                      Amazon FSx for NetApp ONTAP

                                      + *
                                    • + *
                                    • + *

                                      Amazon FSx for Windows File Server

                                      *
                                    • *
                                    - * *

                                    This operation requires a client request token in the request that Amazon FSx uses * to ensure idempotent creation. This means that calling the operation multiple times with * the same client request token has no effect. By using the idempotent operation, you can @@ -418,11 +508,36 @@ export class FSx extends FSxClient { * occurred, or your connection was reset. If you use the same client request token and the * initial call created a file system, the client receives success as long as the * parameters are the same.

                                    + *

                                    If a file system with the specified client request token exists and the parameters + * match, CreateFileSystem returns the description of the existing file + * system. If a file system with the specified client request token exists and the + * parameters don't match, this call returns IncompatibleParameterError. If a + * file system with the specified client request token doesn't exist, + * CreateFileSystem does the following:

                                    + *
                                      + *
                                    • + *

                                      Creates a new, empty Amazon FSx file system with an assigned ID, and + * an initial lifecycle state of CREATING.

                                      + *
                                    • + *
                                    • + *

                                      Returns the description of the file system.

                                      + *
                                    • + *
                                    + * + *

                                    This operation requires a client request token in the request that Amazon FSx + * uses to ensure idempotent creation. This means that calling the operation multiple times + * with the same client request token has no effect. By using the idempotent operation, you + * can retry a CreateFileSystem operation without the risk of creating an + * extra file system. This approach can be useful when an initial call fails in a way that + * makes it unclear whether a file system was created. Examples are if a transport-level + * timeout occurred, or your connection was reset. If you use the same client request token + * and the initial call created a file system, the client receives a success message as + * long as the parameters are the same.

                                    * - *

                                    The CreateFileSystem call returns while the file system's - * lifecycle state is still CREATING. You can check the file-system - * creation status by calling the DescribeFileSystems operation, - * which returns the file system state along with other information.

                                    + *

                                    The CreateFileSystem call returns while the file system's lifecycle + * state is still CREATING. You can check the file-system creation status + * by calling the DescribeFileSystems operation, which returns the file system state + * along with other information.

                                    *
                                    */ public createFileSystem( @@ -455,26 +570,26 @@ export class FSx extends FSxClient { } /** - *

                                    Creates a new Amazon FSx for Lustre or Amazon FSx for Windows File Server file system - * from an existing Amazon FSx backup.

                                    + *

                                    Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File + * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

                                    * *

                                    If a file system with the specified client request token exists and the parameters - * match, this operation returns the description of the file system. If a client - * request token specified by the file system exists and the parameters don't match, - * this call returns IncompatibleParameterError. If a file system with the + * match, this operation returns the description of the file system. If a client request + * token with the specified by the file system exists and the parameters don't match, this + * call returns IncompatibleParameterError. If a file system with the * specified client request token doesn't exist, this operation does the following:

                                    * *
                                      *
                                    • - *

                                      Creates a new Amazon FSx file system from backup with an assigned ID, and - * an initial lifecycle state of CREATING.

                                      + *

                                      Creates a new Amazon FSx file system from backup with an assigned ID, + * and an initial lifecycle state of CREATING.

                                      *
                                    • *
                                    • *

                                      Returns the description of the file system.

                                      *
                                    • *
                                    * - *

                                    Parameters like Active Directory, default share name, automatic backup, and backup + *

                                    Parameters like the Active Directory, default share name, automatic backup, and backup * settings default to the parameters of the file system that was backed up, unless * overridden. You can explicitly supply other settings.

                                    * @@ -483,14 +598,14 @@ export class FSx extends FSxClient { * file system. This approach can be useful when an initial call fails in a way that makes * it unclear whether a file system was created. Examples are if a transport level timeout * occurred, or your connection was reset. If you use the same client request token and the - * initial call created a file system, the client receives success as long as the + * initial call created a file system, the client receives a success message as long as the * parameters are the same.

                                    * - *

                                    The CreateFileSystemFromBackup call returns while the file - * system's lifecycle state is still CREATING. You can check the - * file-system creation status by calling the DescribeFileSystems - * operation, which returns the file system state along with other - * information.

                                    + *

                                    The CreateFileSystemFromBackup call returns while the file system's + * lifecycle state is still CREATING. You can check the file-system + * creation status by calling the + * DescribeFileSystems operation, which returns the file system state along + * with other information.

                                    *
                                    */ public createFileSystemFromBackup( @@ -522,6 +637,65 @@ export class FSx extends FSxClient { } } + /** + *

                                    Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + * snapshots, you can easily undo file changes and compare file versions by restoring the + * volume to a previous version.

                                    + *

                                    If a snapshot with the specified client request token exists, and the parameters + * match, this operation returns the description of the existing snapshot. If a snapshot + * with the specified client request token exists, and the parameters don't match, this + * operation returns IncompatibleParameterError. If a snapshot with the + * specified client request token doesn't exist, CreateSnapshot does the + * following:

                                    + *
                                      + *
                                    • + *

                                      Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle + * state of CREATING.

                                      + *
                                    • + *
                                    • + *

                                      Returns the description of the snapshot.

                                      + *
                                    • + *
                                    + *

                                    By using the idempotent operation, you can retry a CreateSnapshot + * operation without the risk of creating an extra snapshot. This approach can be useful + * when an initial call fails in a way that makes it unclear whether a snapshot was + * created. If you use the same client request token and the initial call created a + * snapshot, the operation returns a successful result because all the parameters are the + * same.

                                    + *

                                    The CreateSnapshot operation returns while the snapshot's lifecycle state + * is still CREATING. You can check the snapshot creation status by calling + * the DescribeSnapshots operation, which returns the snapshot state along with + * other information.

                                    + */ + public createSnapshot( + args: CreateSnapshotCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createSnapshot( + args: CreateSnapshotCommandInput, + cb: (err: any, data?: CreateSnapshotCommandOutput) => void + ): void; + public createSnapshot( + args: CreateSnapshotCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateSnapshotCommandOutput) => void + ): void; + public createSnapshot( + args: CreateSnapshotCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateSnapshotCommandOutput) => void), + cb?: (err: any, data?: CreateSnapshotCommandOutput) => void + ): Promise | void { + const command = new CreateSnapshotCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                    Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system.

                                    */ @@ -555,7 +729,8 @@ export class FSx extends FSxClient { } /** - *

                                    Creates an Amazon FSx for NetApp ONTAP storage volume.

                                    + *

                                    Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage + * volume.

                                    */ public createVolume( args: CreateVolumeCommandInput, @@ -617,10 +792,11 @@ export class FSx extends FSxClient { } /** - *

                                    Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

                                    + *

                                    Deletes an Amazon FSx backup. After deletion, the backup no longer exists, and + * its data is gone.

                                    * - *

                                    The DeleteBackup call returns instantly. The backup will not show up - * in later DescribeBackups calls.

                                    + *

                                    The DeleteBackup call returns instantly. The backup won't show up in + * later DescribeBackups calls.

                                    * * *

                                    The data in a deleted backup is also deleted and can't be recovered by any @@ -654,27 +830,64 @@ export class FSx extends FSxClient { } /** - *

                                    Deletes a file system, deleting its contents. After deletion, the file system no - * longer exists, and its data is gone. Any existing automatic backups will also be - * deleted.

                                    - *

                                    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes - * and SVMs on the file system. Then provide a FileSystemId value to the - * DeleFileSystem operation.

                                    + *

                                    Deletes a data repository association on an Amazon FSx for Lustre + * file system. Deleting the data repository association unlinks the + * file system from the Amazon S3 bucket. When deleting a data repository + * association, you have the option of deleting the data in the file system + * that corresponds to the data repository association. Data repository + * associations are supported only for file systems with the + * Persistent_2 deployment type.

                                    + */ + public deleteDataRepositoryAssociation( + args: DeleteDataRepositoryAssociationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteDataRepositoryAssociation( + args: DeleteDataRepositoryAssociationCommandInput, + cb: (err: any, data?: DeleteDataRepositoryAssociationCommandOutput) => void + ): void; + public deleteDataRepositoryAssociation( + args: DeleteDataRepositoryAssociationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteDataRepositoryAssociationCommandOutput) => void + ): void; + public deleteDataRepositoryAssociation( + args: DeleteDataRepositoryAssociationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteDataRepositoryAssociationCommandOutput) => void), + cb?: (err: any, data?: DeleteDataRepositoryAssociationCommandOutput) => void + ): Promise | void { + const command = new DeleteDataRepositoryAssociationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                    Deletes a file system. After deletion, the file system no longer exists, and its data + * is gone. Any existing automatic backups and snapshots are also deleted.

                                    + *

                                    To delete an Amazon FSx for NetApp ONTAP file system, first delete all the + * volumes and storage virtual machines (SVMs) on the file system. Then provide a + * FileSystemId value to the DeleFileSystem operation.

                                    * - *

                                    By default, when you delete an Amazon FSx for Windows File Server file system, a - * final backup is created upon deletion. This final backup is not subject to the file + *

                                    By default, when you delete an Amazon FSx for Windows File Server file system, + * a final backup is created upon deletion. This final backup isn't subject to the file * system's retention policy, and must be manually deleted.

                                    * - *

                                    The DeleteFileSystem action returns while the file system has the + *

                                    The DeleteFileSystem operation returns while the file system has the * DELETING status. You can check the file system deletion status by - * calling the DescribeFileSystems action, which returns a list of file - * systems in your account. If you pass the file system ID for a deleted file system, the - * DescribeFileSystems returns a FileSystemNotFound + * calling the DescribeFileSystems operation, which returns a list of file systems in your + * account. If you pass the file system ID for a deleted file system, the + * DescribeFileSystems operation returns a FileSystemNotFound * error.

                                    * - *

                                    Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest if - * a data repository task is in a PENDING or EXECUTING - * state.

                                    + *

                                    If a data repository task is in a PENDING or EXECUTING state, + * deleting an Amazon FSx for Lustre file system will fail with an HTTP status + * code 400 (Bad Request).

                                    *
                                    * *

                                    The data in a deleted file system is also deleted and can't be recovered by @@ -710,6 +923,42 @@ export class FSx extends FSxClient { } } + /** + *

                                    Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a + * file system backup.

                                    + *

                                    The DeleteSnapshot operation returns instantly. The snapshot appears with + * the lifecycle status of DELETING until the deletion is complete.

                                    + */ + public deleteSnapshot( + args: DeleteSnapshotCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteSnapshot( + args: DeleteSnapshotCommandInput, + cb: (err: any, data?: DeleteSnapshotCommandOutput) => void + ): void; + public deleteSnapshot( + args: DeleteSnapshotCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteSnapshotCommandOutput) => void + ): void; + public deleteSnapshot( + args: DeleteSnapshotCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteSnapshotCommandOutput) => void), + cb?: (err: any, data?: DeleteSnapshotCommandOutput) => void + ): Promise | void { + const command = new DeleteSnapshotCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                    Deletes an existing Amazon FSx for ONTAP storage virtual machine (SVM). Prior * to deleting an SVM, you must delete all non-root volumes in the SVM, otherwise the operation will fail.

                                    @@ -744,10 +993,8 @@ export class FSx extends FSxClient { } /** - *

                                    Deletes an Amazon FSx for NetApp ONTAP volume. When deleting a volume, - * you have the option of creating a final backup. If you create a final backup, you have the option to - * apply Tags to the backup. You need to have fsx:TagResource - * permission in order to apply tags to the backup.

                                    + *

                                    Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS + * volume.

                                    */ public deleteVolume( args: DeleteVolumeCommandInput, @@ -776,34 +1023,33 @@ export class FSx extends FSxClient { } /** - *

                                    Returns the description of specific Amazon FSx backups, if - * a BackupIds value is provided for that backup. Otherwise, it returns all - * backups owned by your Amazon Web Services account in the Amazon Web Services Region - * of the endpoint that you're calling.

                                    + *

                                    Returns the description of a specific Amazon FSx backup, if a + * BackupIds value is provided for that backup. Otherwise, it returns all + * backups owned by your Amazon Web Services account in the Amazon Web Services Region of the + * endpoint that you're calling.

                                    * *

                                    When retrieving all backups, you can optionally specify the MaxResults - * parameter to limit the number of backups in a response. If more backups remain, Amazon - * FSx returns a NextToken value in the response. In this case, send a later - * request with the NextToken request parameter set to the value of - * NextToken from the last response.

                                    + * parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, + * send a later request with the NextToken request parameter set to the value + * of the NextToken value from the last response.

                                    * - *

                                    This action is used in an iterative process to retrieve a list of your backups. - * DescribeBackups is called first without a NextTokenvalue. - * Then the action continues to be called with the NextToken parameter set to - * the value of the last NextToken value until a response has no - * NextToken.

                                    + *

                                    This operation is used in an iterative process to retrieve a list of your backups. + * DescribeBackups is called first without a NextToken value. + * Then the operation continues to be called with the NextToken parameter set + * to the value of the last NextToken value until a response has no + * NextToken value.

                                    * - *

                                    When using this action, keep the following in mind:

                                    + *

                                    When using this operation, keep the following in mind:

                                    *
                                      *
                                    • - *

                                      The implementation might return fewer than MaxResults + *

                                      The operation might return fewer than the MaxResults value of * backup descriptions while still including a NextToken * value.

                                      *
                                    • *
                                    • - *

                                      The order of backups returned in the response of one - * DescribeBackups call and the order of backups returned across - * the responses of a multi-call iteration is unspecified.

                                      + *

                                      The order of the backups returned in the response of one + * DescribeBackups call and the order of the backups returned + * across the responses of a multi-call iteration is unspecified.

                                      *
                                    • *
                                    */ @@ -836,6 +1082,56 @@ export class FSx extends FSxClient { } } + /** + *

                                    Returns the description of specific Amazon FSx for Lustre data repository associations, if + * one or more AssociationIds values are provided in the request, or if filters are + * used in the request. Data repository associations are supported only + * for file systems with the Persistent_2 deployment type.

                                    + * + *

                                    You can use filters to narrow the response to include just data repository + * associations for specific file systems (use the file-system-id filter with + * the ID of the file system) or data repository associations for a specific repository type + * (use the data-repository-type filter with a value of S3). + * If you don't use filters, the response returns all data repository associations + * owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint + * that you're calling.

                                    + * + *

                                    When retrieving all data repository associations, you can paginate the response by using + * the optional MaxResults parameter to limit the number of data repository associations + * returned in a response. If more data repository associations remain, Amazon FSx returns a + * NextToken value in the response. In this case, send a later + * request with the NextToken request parameter set to the value of + * NextToken from the last response.

                                    + */ + public describeDataRepositoryAssociations( + args: DescribeDataRepositoryAssociationsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeDataRepositoryAssociations( + args: DescribeDataRepositoryAssociationsCommandInput, + cb: (err: any, data?: DescribeDataRepositoryAssociationsCommandOutput) => void + ): void; + public describeDataRepositoryAssociations( + args: DescribeDataRepositoryAssociationsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeDataRepositoryAssociationsCommandOutput) => void + ): void; + public describeDataRepositoryAssociations( + args: DescribeDataRepositoryAssociationsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeDataRepositoryAssociationsCommandOutput) => void), + cb?: (err: any, data?: DescribeDataRepositoryAssociationsCommandOutput) => void + ): Promise | void { + const command = new DescribeDataRepositoryAssociationsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                    Returns the description of specific Amazon FSx for Lustre data repository tasks, if * one or more TaskIds values are provided in the request, or if filters are used in the request. @@ -915,23 +1211,23 @@ export class FSx extends FSxClient { /** *

                                    Returns the description of specific Amazon FSx file systems, if a * FileSystemIds value is provided for that file system. Otherwise, it - * returns descriptions of all file systems owned by your Amazon Web Services account in - * the Amazon Web Services Region of the endpoint that you're calling.

                                    + * returns descriptions of all file systems owned by your Amazon Web Services account in the + * Amazon Web Services Region of the endpoint that you're calling.

                                    * *

                                    When retrieving all file system descriptions, you can optionally specify the * MaxResults parameter to limit the number of descriptions in a response. - * If more file system descriptions remain, Amazon FSx returns a NextToken - * value in the response. In this case, send a later request with the - * NextToken request parameter set to the value of NextToken - * from the last response.

                                    + * If more file system descriptions remain, Amazon FSx returns a + * NextToken value in the response. In this case, send a later request + * with the NextToken request parameter set to the value of + * NextToken from the last response.

                                    * - *

                                    This action is used in an iterative process to retrieve a list of your file system + *

                                    This operation is used in an iterative process to retrieve a list of your file system * descriptions. DescribeFileSystems is called first without a - * NextTokenvalue. Then the action continues to be called with the + * NextTokenvalue. Then the operation continues to be called with the * NextToken parameter set to the value of the last NextToken * value until a response has no NextToken.

                                    * - *

                                    When using this action, keep the following in mind:

                                    + *

                                    When using this operation, keep the following in mind:

                                    *
                                      *
                                    • *

                                      The implementation might return fewer than MaxResults file @@ -975,6 +1271,64 @@ export class FSx extends FSxClient { } } + /** + *

                                      Returns the description of specific Amazon FSx snapshots, if a + * SnapshotIds value is provided. Otherwise, this operation returns all + * snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of + * the endpoint that you're calling.

                                      + *

                                      When retrieving all snapshots, you can optionally specify the MaxResults + * parameter to limit the number of snapshots in a response. If more backups remain, + * Amazon FSx returns a NextToken value in the response. In this + * case, send a later request with the NextToken request parameter set to the + * value of NextToken from the last response.

                                      + *

                                      Use this operation in an iterative process to retrieve a list of your snapshots. + * DescribeSnapshots is called first without a NextToken + * value. Then the operation continues to be called with the NextToken + * parameter set to the value of the last NextToken value until a response has + * no NextToken value.

                                      + *

                                      When using this operation, keep the following in mind:

                                      + *
                                        + *
                                      • + *

                                        The operation might return fewer than the MaxResults value of + * snapshot descriptions while still including a NextToken + * value.

                                        + *
                                      • + *
                                      • + *

                                        The order of snapshots returned in the response of one + * DescribeSnapshots call and the order of backups returned across + * the responses of a multi-call iteration is unspecified.

                                        + *
                                      • + *
                                      + */ + public describeSnapshots( + args: DescribeSnapshotsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeSnapshots( + args: DescribeSnapshotsCommandInput, + cb: (err: any, data?: DescribeSnapshotsCommandOutput) => void + ): void; + public describeSnapshots( + args: DescribeSnapshotsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeSnapshotsCommandOutput) => void + ): void; + public describeSnapshots( + args: DescribeSnapshotsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeSnapshotsCommandOutput) => void), + cb?: (err: any, data?: DescribeSnapshotsCommandOutput) => void + ): Promise | void { + const command = new DescribeSnapshotsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                      Describes one or more Amazon FSx for NetApp ONTAP storage virtual machines (SVMs).

                                      */ @@ -1008,7 +1362,8 @@ export class FSx extends FSxClient { } /** - *

                                      Describes one or more Amazon FSx for NetApp ONTAP volumes.

                                      + *

                                      Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for + * OpenZFS volumes.

                                      */ public describeVolumes( args: DescribeVolumesCommandInput, @@ -1139,6 +1494,72 @@ export class FSx extends FSxClient { } } + /** + *

                                      Releases the file system lock from an Amazon FSx for OpenZFS file + * system.

                                      + */ + public releaseFileSystemNfsV3Locks( + args: ReleaseFileSystemNfsV3LocksCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public releaseFileSystemNfsV3Locks( + args: ReleaseFileSystemNfsV3LocksCommandInput, + cb: (err: any, data?: ReleaseFileSystemNfsV3LocksCommandOutput) => void + ): void; + public releaseFileSystemNfsV3Locks( + args: ReleaseFileSystemNfsV3LocksCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ReleaseFileSystemNfsV3LocksCommandOutput) => void + ): void; + public releaseFileSystemNfsV3Locks( + args: ReleaseFileSystemNfsV3LocksCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ReleaseFileSystemNfsV3LocksCommandOutput) => void), + cb?: (err: any, data?: ReleaseFileSystemNfsV3LocksCommandOutput) => void + ): Promise | void { + const command = new ReleaseFileSystemNfsV3LocksCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                      Returns an Amazon FSx for OpenZFS volume to the state saved by the specified + * snapshot.

                                      + */ + public restoreVolumeFromSnapshot( + args: RestoreVolumeFromSnapshotCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public restoreVolumeFromSnapshot( + args: RestoreVolumeFromSnapshotCommandInput, + cb: (err: any, data?: RestoreVolumeFromSnapshotCommandOutput) => void + ): void; + public restoreVolumeFromSnapshot( + args: RestoreVolumeFromSnapshotCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: RestoreVolumeFromSnapshotCommandOutput) => void + ): void; + public restoreVolumeFromSnapshot( + args: RestoreVolumeFromSnapshotCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: RestoreVolumeFromSnapshotCommandOutput) => void), + cb?: (err: any, data?: RestoreVolumeFromSnapshotCommandOutput) => void + ): Promise | void { + const command = new RestoreVolumeFromSnapshotCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                      Tags an Amazon FSx resource.

                                      */ @@ -1198,69 +1619,176 @@ export class FSx extends FSxClient { } /** - *

                                      Use this operation to update the configuration of an existing Amazon FSx file system. - * You can update multiple properties in a single request.

                                      + *

                                      Updates the configuration of an existing data repository association + * on an Amazon FSx for Lustre file system. Data repository associations are + * supported only for file systems with the Persistent_2 deployment type.

                                      + */ + public updateDataRepositoryAssociation( + args: UpdateDataRepositoryAssociationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateDataRepositoryAssociation( + args: UpdateDataRepositoryAssociationCommandInput, + cb: (err: any, data?: UpdateDataRepositoryAssociationCommandOutput) => void + ): void; + public updateDataRepositoryAssociation( + args: UpdateDataRepositoryAssociationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateDataRepositoryAssociationCommandOutput) => void + ): void; + public updateDataRepositoryAssociation( + args: UpdateDataRepositoryAssociationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateDataRepositoryAssociationCommandOutput) => void), + cb?: (err: any, data?: UpdateDataRepositoryAssociationCommandOutput) => void + ): Promise | void { + const command = new UpdateDataRepositoryAssociationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                      Use this operation to update the configuration of an existing Amazon FSx file + * system. You can update multiple properties in a single request.

                                      *

                                      For Amazon FSx for Windows File Server file systems, you can update the following - * properties:

                                      + * properties:

                                      *
                                        *
                                      • - *

                                        AuditLogConfiguration

                                        + *

                                        + * AuditLogConfiguration + *

                                        *
                                      • *
                                      • - *

                                        AutomaticBackupRetentionDays

                                        + *

                                        + * AutomaticBackupRetentionDays + *

                                        *
                                      • *
                                      • - *

                                        DailyAutomaticBackupStartTime

                                        + *

                                        + * DailyAutomaticBackupStartTime + *

                                        *
                                      • *
                                      • - *

                                        SelfManagedActiveDirectoryConfiguration

                                        + *

                                        + * SelfManagedActiveDirectoryConfiguration + *

                                        *
                                      • *
                                      • - *

                                        StorageCapacity

                                        + *

                                        + * StorageCapacity + *

                                        *
                                      • *
                                      • - *

                                        ThroughputCapacity

                                        + *

                                        + * ThroughputCapacity + *

                                        *
                                      • *
                                      • - *

                                        WeeklyMaintenanceStartTime

                                        + *

                                        + * WeeklyMaintenanceStartTime + *

                                        *
                                      • *
                                      - *

                                      For Amazon FSx for Lustre file systems, you can update the following + *

                                      For FSx for Lustre file systems, you can update the following * properties:

                                      *
                                        *
                                      • - *

                                        AutoImportPolicy

                                        + *

                                        + * AutoImportPolicy + *

                                        + *
                                      • + *
                                      • + *

                                        + * AutomaticBackupRetentionDays + *

                                        + *
                                      • + *
                                      • + *

                                        + * DailyAutomaticBackupStartTime + *

                                        + *
                                      • + *
                                      • + *

                                        + * DataCompressionType + *

                                        + *
                                      • + *
                                      • + *

                                        + * StorageCapacity + *

                                        *
                                      • *
                                      • - *

                                        AutomaticBackupRetentionDays

                                        + *

                                        + * WeeklyMaintenanceStartTime + *

                                        *
                                      • + *
                                      + *

                                      For FSx for ONTAP file systems, you can update the following + * properties:

                                      + *
                                        *
                                      • - *

                                        DailyAutomaticBackupStartTime

                                        + *

                                        + * AutomaticBackupRetentionDays + *

                                        *
                                      • *
                                      • - *

                                        DataCompressionType

                                        + *

                                        + * DailyAutomaticBackupStartTime + *

                                        *
                                      • *
                                      • - *

                                        StorageCapacity

                                        + *

                                        + * FsxAdminPassword + *

                                        *
                                      • *
                                      • - *

                                        WeeklyMaintenanceStartTime

                                        + *

                                        + * WeeklyMaintenanceStartTime + *

                                        *
                                      • *
                                      - *

                                      For Amazon FSx for NetApp ONTAP file systems, you can update the following + *

                                      For the Amazon FSx for OpenZFS file systems, you can update the following * properties:

                                      *
                                        *
                                      • - *

                                        AutomaticBackupRetentionDays

                                        + *

                                        + * AutomaticBackupRetentionDays + *

                                        *
                                      • *
                                      • - *

                                        DailyAutomaticBackupStartTime

                                        + *

                                        + * CopyTagsToBackups + *

                                        *
                                      • *
                                      • - *

                                        FsxAdminPassword

                                        + *

                                        + * CopyTagsToVolumes + *

                                        *
                                      • *
                                      • - *

                                        WeeklyMaintenanceStartTime

                                        + *

                                        + * DailyAutomaticBackupStartTime + *

                                        + *
                                      • + *
                                      • + *

                                        + * DiskIopsConfiguration + *

                                        + *
                                      • + *
                                      • + *

                                        + * ThroughputCapacity + *

                                        + *
                                      • + *
                                      • + *

                                        + * WeeklyMaintenanceStartTime + *

                                        *
                                      • *
                                      */ @@ -1293,6 +1821,38 @@ export class FSx extends FSxClient { } } + /** + *

                                      Updates the name of a snapshot.

                                      + */ + public updateSnapshot( + args: UpdateSnapshotCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateSnapshot( + args: UpdateSnapshotCommandInput, + cb: (err: any, data?: UpdateSnapshotCommandOutput) => void + ): void; + public updateSnapshot( + args: UpdateSnapshotCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateSnapshotCommandOutput) => void + ): void; + public updateSnapshot( + args: UpdateSnapshotCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateSnapshotCommandOutput) => void), + cb?: (err: any, data?: UpdateSnapshotCommandOutput) => void + ): Promise | void { + const command = new UpdateSnapshotCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                      Updates an Amazon FSx for ONTAP storage virtual machine (SVM).

                                      */ @@ -1326,7 +1886,7 @@ export class FSx extends FSxClient { } /** - *

                                      Updates an Amazon FSx for NetApp ONTAP volume's configuration.

                                      + *

                                      Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

                                      */ public updateVolume( args: UpdateVolumeCommandInput, diff --git a/clients/client-fsx/src/FSxClient.ts b/clients/client-fsx/src/FSxClient.ts index 2a054f975bae..3564b4491b03 100644 --- a/clients/client-fsx/src/FSxClient.ts +++ b/clients/client-fsx/src/FSxClient.ts @@ -59,6 +59,10 @@ import { } from "./commands/CancelDataRepositoryTaskCommand"; import { CopyBackupCommandInput, CopyBackupCommandOutput } from "./commands/CopyBackupCommand"; import { CreateBackupCommandInput, CreateBackupCommandOutput } from "./commands/CreateBackupCommand"; +import { + CreateDataRepositoryAssociationCommandInput, + CreateDataRepositoryAssociationCommandOutput, +} from "./commands/CreateDataRepositoryAssociationCommand"; import { CreateDataRepositoryTaskCommandInput, CreateDataRepositoryTaskCommandOutput, @@ -68,6 +72,7 @@ import { CreateFileSystemFromBackupCommandInput, CreateFileSystemFromBackupCommandOutput, } from "./commands/CreateFileSystemFromBackupCommand"; +import { CreateSnapshotCommandInput, CreateSnapshotCommandOutput } from "./commands/CreateSnapshotCommand"; import { CreateStorageVirtualMachineCommandInput, CreateStorageVirtualMachineCommandOutput, @@ -78,13 +83,22 @@ import { CreateVolumeFromBackupCommandOutput, } from "./commands/CreateVolumeFromBackupCommand"; import { DeleteBackupCommandInput, DeleteBackupCommandOutput } from "./commands/DeleteBackupCommand"; +import { + DeleteDataRepositoryAssociationCommandInput, + DeleteDataRepositoryAssociationCommandOutput, +} from "./commands/DeleteDataRepositoryAssociationCommand"; import { DeleteFileSystemCommandInput, DeleteFileSystemCommandOutput } from "./commands/DeleteFileSystemCommand"; +import { DeleteSnapshotCommandInput, DeleteSnapshotCommandOutput } from "./commands/DeleteSnapshotCommand"; import { DeleteStorageVirtualMachineCommandInput, DeleteStorageVirtualMachineCommandOutput, } from "./commands/DeleteStorageVirtualMachineCommand"; import { DeleteVolumeCommandInput, DeleteVolumeCommandOutput } from "./commands/DeleteVolumeCommand"; import { DescribeBackupsCommandInput, DescribeBackupsCommandOutput } from "./commands/DescribeBackupsCommand"; +import { + DescribeDataRepositoryAssociationsCommandInput, + DescribeDataRepositoryAssociationsCommandOutput, +} from "./commands/DescribeDataRepositoryAssociationsCommand"; import { DescribeDataRepositoryTasksCommandInput, DescribeDataRepositoryTasksCommandOutput, @@ -97,6 +111,7 @@ import { DescribeFileSystemsCommandInput, DescribeFileSystemsCommandOutput, } from "./commands/DescribeFileSystemsCommand"; +import { DescribeSnapshotsCommandInput, DescribeSnapshotsCommandOutput } from "./commands/DescribeSnapshotsCommand"; import { DescribeStorageVirtualMachinesCommandInput, DescribeStorageVirtualMachinesCommandOutput, @@ -110,9 +125,22 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "./commands/ListTagsForResourceCommand"; +import { + ReleaseFileSystemNfsV3LocksCommandInput, + ReleaseFileSystemNfsV3LocksCommandOutput, +} from "./commands/ReleaseFileSystemNfsV3LocksCommand"; +import { + RestoreVolumeFromSnapshotCommandInput, + RestoreVolumeFromSnapshotCommandOutput, +} from "./commands/RestoreVolumeFromSnapshotCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateDataRepositoryAssociationCommandInput, + UpdateDataRepositoryAssociationCommandOutput, +} from "./commands/UpdateDataRepositoryAssociationCommand"; import { UpdateFileSystemCommandInput, UpdateFileSystemCommandOutput } from "./commands/UpdateFileSystemCommand"; +import { UpdateSnapshotCommandInput, UpdateSnapshotCommandOutput } from "./commands/UpdateSnapshotCommand"; import { UpdateStorageVirtualMachineCommandInput, UpdateStorageVirtualMachineCommandOutput, @@ -125,27 +153,37 @@ export type ServiceInputTypes = | CancelDataRepositoryTaskCommandInput | CopyBackupCommandInput | CreateBackupCommandInput + | CreateDataRepositoryAssociationCommandInput | CreateDataRepositoryTaskCommandInput | CreateFileSystemCommandInput | CreateFileSystemFromBackupCommandInput + | CreateSnapshotCommandInput | CreateStorageVirtualMachineCommandInput | CreateVolumeCommandInput | CreateVolumeFromBackupCommandInput | DeleteBackupCommandInput + | DeleteDataRepositoryAssociationCommandInput | DeleteFileSystemCommandInput + | DeleteSnapshotCommandInput | DeleteStorageVirtualMachineCommandInput | DeleteVolumeCommandInput | DescribeBackupsCommandInput + | DescribeDataRepositoryAssociationsCommandInput | DescribeDataRepositoryTasksCommandInput | DescribeFileSystemAliasesCommandInput | DescribeFileSystemsCommandInput + | DescribeSnapshotsCommandInput | DescribeStorageVirtualMachinesCommandInput | DescribeVolumesCommandInput | DisassociateFileSystemAliasesCommandInput | ListTagsForResourceCommandInput + | ReleaseFileSystemNfsV3LocksCommandInput + | RestoreVolumeFromSnapshotCommandInput | TagResourceCommandInput | UntagResourceCommandInput + | UpdateDataRepositoryAssociationCommandInput | UpdateFileSystemCommandInput + | UpdateSnapshotCommandInput | UpdateStorageVirtualMachineCommandInput | UpdateVolumeCommandInput; @@ -154,27 +192,37 @@ export type ServiceOutputTypes = | CancelDataRepositoryTaskCommandOutput | CopyBackupCommandOutput | CreateBackupCommandOutput + | CreateDataRepositoryAssociationCommandOutput | CreateDataRepositoryTaskCommandOutput | CreateFileSystemCommandOutput | CreateFileSystemFromBackupCommandOutput + | CreateSnapshotCommandOutput | CreateStorageVirtualMachineCommandOutput | CreateVolumeCommandOutput | CreateVolumeFromBackupCommandOutput | DeleteBackupCommandOutput + | DeleteDataRepositoryAssociationCommandOutput | DeleteFileSystemCommandOutput + | DeleteSnapshotCommandOutput | DeleteStorageVirtualMachineCommandOutput | DeleteVolumeCommandOutput | DescribeBackupsCommandOutput + | DescribeDataRepositoryAssociationsCommandOutput | DescribeDataRepositoryTasksCommandOutput | DescribeFileSystemAliasesCommandOutput | DescribeFileSystemsCommandOutput + | DescribeSnapshotsCommandOutput | DescribeStorageVirtualMachinesCommandOutput | DescribeVolumesCommandOutput | DisassociateFileSystemAliasesCommandOutput | ListTagsForResourceCommandOutput + | ReleaseFileSystemNfsV3LocksCommandOutput + | RestoreVolumeFromSnapshotCommandOutput | TagResourceCommandOutput | UntagResourceCommandOutput + | UpdateDataRepositoryAssociationCommandOutput | UpdateFileSystemCommandOutput + | UpdateSnapshotCommandOutput | UpdateStorageVirtualMachineCommandOutput | UpdateVolumeCommandOutput; diff --git a/clients/client-fsx/src/commands/CopyBackupCommand.ts b/clients/client-fsx/src/commands/CopyBackupCommand.ts index 27cbc021e4e6..37a401ef8bf1 100644 --- a/clients/client-fsx/src/commands/CopyBackupCommand.ts +++ b/clients/client-fsx/src/commands/CopyBackupCommand.ts @@ -25,24 +25,24 @@ export interface CopyBackupCommandOutput extends CopyBackupResponse, __MetadataB *

                                      Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region * (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five * backup copy requests in progress to a single destination Region per account.

                                      - *

                                      You can use cross-Region backup copies for cross-region disaster recovery. - * You periodically take backups and copy them to another Region so that in the - * event of a disaster in the primary Region, you can restore from backup and recover - * availability quickly in the other Region. You can make cross-Region copies - * only within your Amazon Web Services partition.

                                      - *

                                      You can also use backup copies to clone your file data set to another Region - * or within the same Region.

                                      + *

                                      You can use cross-Region backup copies for cross-Region disaster recovery. You can + * periodically take backups and copy them to another Region so that in the event of a + * disaster in the primary Region, you can restore from backup and recover availability + * quickly in the other Region. You can make cross-Region copies only within your Amazon Web Services partition. A partition is a grouping of Regions. Amazon Web Services currently + * has three partitions: aws (Standard Regions), aws-cn (China + * Regions), and aws-us-gov (Amazon Web Services GovCloud [US] Regions).

                                      + *

                                      You can also use backup copies to clone your file dataset to another Region or within + * the same Region.

                                      *

                                      You can use the SourceRegion parameter to specify the Amazon Web Services Region * from which the backup will be copied. For example, if you make the call from the * us-west-1 Region and want to copy a backup from the us-east-2 * Region, you specify us-east-2 in the SourceRegion parameter * to make a cross-Region copy. If you don't specify a Region, the backup copy is * created in the same Region where the request is sent from (in-Region copy).

                                      - *

                                      For more information on creating backup copies, see - * - * Copying backups in the Amazon FSx for Windows User Guide and - * Copying backups - * in the Amazon FSx for Lustre User Guide.

                                      + *

                                      For more information about creating backup copies, see Copying backups + * in the Amazon FSx for Windows User Guide, Copying backups in the Amazon FSx for Lustre User + * Guide, and Copying backups in the Amazon FSx for OpenZFS User + * Guide.

                                      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/CreateBackupCommand.ts b/clients/client-fsx/src/commands/CreateBackupCommand.ts index b0bbd49c105f..ce3c08dee926 100644 --- a/clients/client-fsx/src/commands/CreateBackupCommand.ts +++ b/clients/client-fsx/src/commands/CreateBackupCommand.ts @@ -22,42 +22,44 @@ export interface CreateBackupCommandInput extends CreateBackupRequest {} export interface CreateBackupCommandOutput extends CreateBackupResponse, __MetadataBearer {} /** - *

                                      Creates a backup of an existing Amazon FSx for Windows File Server - * or Amazon FSx for Lustre file system, or of an Amazon FSx for NetApp ONTAP - * volume. Creating regular backups is a best practice, enabling you to restore - * a file system or volume from a backup if an issue arises with the original - * file system or volume.

                                      - *

                                      For Amazon FSx for Lustre file systems, you can create a backup only - * for file systems with the following configuration:

                                      + *

                                      Creates a backup of an existing Amazon FSx for Windows File Server file + * system, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP + * volume, or Amazon FSx for OpenZFS file system. We recommend creating regular + * backups so that you can restore a file system or volume from a backup if an issue arises + * with the original file system or volume.

                                      + *

                                      For Amazon FSx for Lustre file systems, you can create a backup only for file + * systems that have the following configuration:

                                      *
                                        *
                                      • - *

                                        a Persistent deployment type

                                        + *

                                        A Persistent deployment type

                                        *
                                      • *
                                      • - *

                                        is not linked to a data repository.

                                        + *

                                        Are not linked to a data repository

                                        *
                                      • *
                                      *

                                      For more information about backups, see the following:

                                      * - *

                                      If a backup with the specified client request token exists, and the parameters - * match, this operation returns the description of the existing backup. If a backup - * specified client request token exists, and the parameters don't match, this - * operation returns IncompatibleParameterError. If a backup with the - * specified client request token doesn't exist, CreateBackup does the - * following:

                                      + *

                                      If a backup with the specified client request token exists and the parameters match, + * this operation returns the description of the existing backup. If a backup with the + * specified client request token exists and the parameters don't match, this operation + * returns IncompatibleParameterError. If a backup with the specified client + * request token doesn't exist, CreateBackup does the following:

                                      *
                                        *
                                      • *

                                        Creates a new Amazon FSx backup with an assigned ID, and an initial @@ -74,10 +76,10 @@ export interface CreateBackupCommandOutput extends CreateBackupResponse, __Metad * you use the same client request token and the initial call created a backup, the * operation returns a successful result because all the parameters are the same.

                                        * - *

                                        The CreateBackup operation returns while the backup's - * lifecycle state is still CREATING. You can check the backup creation - * status by calling the DescribeBackups operation, which returns the - * backup state along with other information.

                                        + *

                                        The CreateBackup operation returns while the backup's lifecycle state is + * still CREATING. You can check the backup creation status by calling the + * DescribeBackups operation, which returns the backup state along with other + * information.

                                        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/CreateDataRepositoryAssociationCommand.ts b/clients/client-fsx/src/commands/CreateDataRepositoryAssociationCommand.ts new file mode 100644 index 000000000000..ea480500a3f2 --- /dev/null +++ b/clients/client-fsx/src/commands/CreateDataRepositoryAssociationCommand.ts @@ -0,0 +1,113 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { CreateDataRepositoryAssociationRequest, CreateDataRepositoryAssociationResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreateDataRepositoryAssociationCommand, + serializeAws_json1_1CreateDataRepositoryAssociationCommand, +} from "../protocols/Aws_json1_1"; + +export interface CreateDataRepositoryAssociationCommandInput extends CreateDataRepositoryAssociationRequest {} +export interface CreateDataRepositoryAssociationCommandOutput + extends CreateDataRepositoryAssociationResponse, + __MetadataBearer {} + +/** + *

                                        Creates an Amazon FSx for Lustre data repository association (DRA). A data + * repository association is a link between a directory on the file system and + * an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository + * associations on a file system. Data repository associations are supported only + * for file systems with the Persistent_2 deployment type.

                                        + *

                                        Each data repository association must have a unique Amazon FSx file + * system directory and a unique S3 bucket or prefix associated with it. You + * can configure a data repository association for automatic import only, + * for automatic export only, or for both. To learn more about linking a + * data repository to your file system, see + * Linking your file system to an S3 bucket.

                                        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, CreateDataRepositoryAssociationCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, CreateDataRepositoryAssociationCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new CreateDataRepositoryAssociationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateDataRepositoryAssociationCommandInput} for command's `input` shape. + * @see {@link CreateDataRepositoryAssociationCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class CreateDataRepositoryAssociationCommand extends $Command< + CreateDataRepositoryAssociationCommandInput, + CreateDataRepositoryAssociationCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateDataRepositoryAssociationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "CreateDataRepositoryAssociationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateDataRepositoryAssociationRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateDataRepositoryAssociationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: CreateDataRepositoryAssociationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1CreateDataRepositoryAssociationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1CreateDataRepositoryAssociationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/CreateDataRepositoryTaskCommand.ts b/clients/client-fsx/src/commands/CreateDataRepositoryTaskCommand.ts index cd4f0dec0be7..e89cbd0c7752 100644 --- a/clients/client-fsx/src/commands/CreateDataRepositoryTaskCommand.ts +++ b/clients/client-fsx/src/commands/CreateDataRepositoryTaskCommand.ts @@ -24,13 +24,14 @@ export interface CreateDataRepositoryTaskCommandOutput extends CreateDataReposit /** *

                                        Creates an Amazon FSx for Lustre data repository task. You use data repository tasks * to perform bulk operations between your Amazon FSx file system and its linked data - * repository. An example of a data repository task is - * exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A - * CreateDataRepositoryTask operation will fail if a data repository is not - * linked to the FSx file system. To learn more about data repository tasks, see + * repositories. An example of a data repository task is exporting any data and metadata + * changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) + * from your FSx file system to a linked data repository. A CreateDataRepositoryTask + * operation will fail if a data repository is not linked to the FSx file system. To learn + * more about data repository tasks, see * Data Repository Tasks. * To learn more about linking a data repository to your file system, see - * Linking your file system to an S3 bucket.

                                        + * Linking your file system to an S3 bucket.

                                        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/CreateFileSystemCommand.ts b/clients/client-fsx/src/commands/CreateFileSystemCommand.ts index e1912d31ee3a..39dd74ca9c7b 100644 --- a/clients/client-fsx/src/commands/CreateFileSystemCommand.ts +++ b/clients/client-fsx/src/commands/CreateFileSystemCommand.ts @@ -22,24 +22,19 @@ export interface CreateFileSystemCommandInput extends CreateFileSystemRequest {} export interface CreateFileSystemCommandOutput extends CreateFileSystemResponse, __MetadataBearer {} /** - *

                                        Creates a new, empty Amazon FSx file system.

                                        - * - *

                                        If a file system with the specified client request token exists and the parameters - * match, CreateFileSystem returns the description of the existing file - * system. If a file system specified client request token exists and the parameters - * don't match, this call returns IncompatibleParameterError. If a file - * system with the specified client request token doesn't exist, - * CreateFileSystem does the following:

                                        + *

                                        Creates a new, empty Amazon FSx file system. You can create the following supported + * Amazon FSx file systems using the CreateFileSystem API operation:

                                        *
                                          *
                                        • - *

                                          Creates a new, empty Amazon FSx file system with an assigned ID, and an - * initial lifecycle state of CREATING.

                                          + *

                                          Amazon FSx for Lustre

                                          *
                                        • *
                                        • - *

                                          Returns the description of the file system.

                                          + *

                                          Amazon FSx for NetApp ONTAP

                                          + *
                                        • + *
                                        • + *

                                          Amazon FSx for Windows File Server

                                          *
                                        • *
                                        - * *

                                        This operation requires a client request token in the request that Amazon FSx uses * to ensure idempotent creation. This means that calling the operation multiple times with * the same client request token has no effect. By using the idempotent operation, you can @@ -49,11 +44,36 @@ export interface CreateFileSystemCommandOutput extends CreateFileSystemResponse, * occurred, or your connection was reset. If you use the same client request token and the * initial call created a file system, the client receives success as long as the * parameters are the same.

                                        + *

                                        If a file system with the specified client request token exists and the parameters + * match, CreateFileSystem returns the description of the existing file + * system. If a file system with the specified client request token exists and the + * parameters don't match, this call returns IncompatibleParameterError. If a + * file system with the specified client request token doesn't exist, + * CreateFileSystem does the following:

                                        + *
                                          + *
                                        • + *

                                          Creates a new, empty Amazon FSx file system with an assigned ID, and + * an initial lifecycle state of CREATING.

                                          + *
                                        • + *
                                        • + *

                                          Returns the description of the file system.

                                          + *
                                        • + *
                                        + * + *

                                        This operation requires a client request token in the request that Amazon FSx + * uses to ensure idempotent creation. This means that calling the operation multiple times + * with the same client request token has no effect. By using the idempotent operation, you + * can retry a CreateFileSystem operation without the risk of creating an + * extra file system. This approach can be useful when an initial call fails in a way that + * makes it unclear whether a file system was created. Examples are if a transport-level + * timeout occurred, or your connection was reset. If you use the same client request token + * and the initial call created a file system, the client receives a success message as + * long as the parameters are the same.

                                        * - *

                                        The CreateFileSystem call returns while the file system's - * lifecycle state is still CREATING. You can check the file-system - * creation status by calling the DescribeFileSystems operation, - * which returns the file system state along with other information.

                                        + *

                                        The CreateFileSystem call returns while the file system's lifecycle + * state is still CREATING. You can check the file-system creation status + * by calling the DescribeFileSystems operation, which returns the file system state + * along with other information.

                                        *
                                        * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts index 92f3aba1d9ad..cfee43a97b5c 100644 --- a/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts +++ b/clients/client-fsx/src/commands/CreateFileSystemFromBackupCommand.ts @@ -22,26 +22,26 @@ export interface CreateFileSystemFromBackupCommandInput extends CreateFileSystem export interface CreateFileSystemFromBackupCommandOutput extends CreateFileSystemFromBackupResponse, __MetadataBearer {} /** - *

                                        Creates a new Amazon FSx for Lustre or Amazon FSx for Windows File Server file system - * from an existing Amazon FSx backup.

                                        + *

                                        Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File + * Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

                                        * *

                                        If a file system with the specified client request token exists and the parameters - * match, this operation returns the description of the file system. If a client - * request token specified by the file system exists and the parameters don't match, - * this call returns IncompatibleParameterError. If a file system with the + * match, this operation returns the description of the file system. If a client request + * token with the specified by the file system exists and the parameters don't match, this + * call returns IncompatibleParameterError. If a file system with the * specified client request token doesn't exist, this operation does the following:

                                        * *
                                          *
                                        • - *

                                          Creates a new Amazon FSx file system from backup with an assigned ID, and - * an initial lifecycle state of CREATING.

                                          + *

                                          Creates a new Amazon FSx file system from backup with an assigned ID, + * and an initial lifecycle state of CREATING.

                                          *
                                        • *
                                        • *

                                          Returns the description of the file system.

                                          *
                                        • *
                                        * - *

                                        Parameters like Active Directory, default share name, automatic backup, and backup + *

                                        Parameters like the Active Directory, default share name, automatic backup, and backup * settings default to the parameters of the file system that was backed up, unless * overridden. You can explicitly supply other settings.

                                        * @@ -50,14 +50,14 @@ export interface CreateFileSystemFromBackupCommandOutput extends CreateFileSyste * file system. This approach can be useful when an initial call fails in a way that makes * it unclear whether a file system was created. Examples are if a transport level timeout * occurred, or your connection was reset. If you use the same client request token and the - * initial call created a file system, the client receives success as long as the + * initial call created a file system, the client receives a success message as long as the * parameters are the same.

                                        * - *

                                        The CreateFileSystemFromBackup call returns while the file - * system's lifecycle state is still CREATING. You can check the - * file-system creation status by calling the DescribeFileSystems - * operation, which returns the file system state along with other - * information.

                                        + *

                                        The CreateFileSystemFromBackup call returns while the file system's + * lifecycle state is still CREATING. You can check the file-system + * creation status by calling the + * DescribeFileSystems operation, which returns the file system state along + * with other information.

                                        *
                                        * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-fsx/src/commands/CreateSnapshotCommand.ts b/clients/client-fsx/src/commands/CreateSnapshotCommand.ts new file mode 100644 index 000000000000..05c963ff826f --- /dev/null +++ b/clients/client-fsx/src/commands/CreateSnapshotCommand.ts @@ -0,0 +1,122 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { CreateSnapshotRequest, CreateSnapshotResponse } from "../models/models_0"; +import { + deserializeAws_json1_1CreateSnapshotCommand, + serializeAws_json1_1CreateSnapshotCommand, +} from "../protocols/Aws_json1_1"; + +export interface CreateSnapshotCommandInput extends CreateSnapshotRequest {} +export interface CreateSnapshotCommandOutput extends CreateSnapshotResponse, __MetadataBearer {} + +/** + *

                                        Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With + * snapshots, you can easily undo file changes and compare file versions by restoring the + * volume to a previous version.

                                        + *

                                        If a snapshot with the specified client request token exists, and the parameters + * match, this operation returns the description of the existing snapshot. If a snapshot + * with the specified client request token exists, and the parameters don't match, this + * operation returns IncompatibleParameterError. If a snapshot with the + * specified client request token doesn't exist, CreateSnapshot does the + * following:

                                        + *
                                          + *
                                        • + *

                                          Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle + * state of CREATING.

                                          + *
                                        • + *
                                        • + *

                                          Returns the description of the snapshot.

                                          + *
                                        • + *
                                        + *

                                        By using the idempotent operation, you can retry a CreateSnapshot + * operation without the risk of creating an extra snapshot. This approach can be useful + * when an initial call fails in a way that makes it unclear whether a snapshot was + * created. If you use the same client request token and the initial call created a + * snapshot, the operation returns a successful result because all the parameters are the + * same.

                                        + *

                                        The CreateSnapshot operation returns while the snapshot's lifecycle state + * is still CREATING. You can check the snapshot creation status by calling + * the DescribeSnapshots operation, which returns the snapshot state along with + * other information.

                                        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, CreateSnapshotCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, CreateSnapshotCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new CreateSnapshotCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateSnapshotCommandInput} for command's `input` shape. + * @see {@link CreateSnapshotCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class CreateSnapshotCommand extends $Command< + CreateSnapshotCommandInput, + CreateSnapshotCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateSnapshotCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "CreateSnapshotCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateSnapshotRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateSnapshotResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateSnapshotCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1CreateSnapshotCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1CreateSnapshotCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/CreateVolumeCommand.ts b/clients/client-fsx/src/commands/CreateVolumeCommand.ts index 242b1725dd26..724c5dc2356f 100644 --- a/clients/client-fsx/src/commands/CreateVolumeCommand.ts +++ b/clients/client-fsx/src/commands/CreateVolumeCommand.ts @@ -22,7 +22,8 @@ export interface CreateVolumeCommandInput extends CreateVolumeRequest {} export interface CreateVolumeCommandOutput extends CreateVolumeResponse, __MetadataBearer {} /** - *

                                        Creates an Amazon FSx for NetApp ONTAP storage volume.

                                        + *

                                        Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage + * volume.

                                        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/DeleteBackupCommand.ts b/clients/client-fsx/src/commands/DeleteBackupCommand.ts index becf472fade2..097014fb4f0d 100644 --- a/clients/client-fsx/src/commands/DeleteBackupCommand.ts +++ b/clients/client-fsx/src/commands/DeleteBackupCommand.ts @@ -22,10 +22,11 @@ export interface DeleteBackupCommandInput extends DeleteBackupRequest {} export interface DeleteBackupCommandOutput extends DeleteBackupResponse, __MetadataBearer {} /** - *

                                        Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

                                        + *

                                        Deletes an Amazon FSx backup. After deletion, the backup no longer exists, and + * its data is gone.

                                        * - *

                                        The DeleteBackup call returns instantly. The backup will not show up - * in later DescribeBackups calls.

                                        + *

                                        The DeleteBackup call returns instantly. The backup won't show up in + * later DescribeBackups calls.

                                        * * *

                                        The data in a deleted backup is also deleted and can't be recovered by any diff --git a/clients/client-fsx/src/commands/DeleteDataRepositoryAssociationCommand.ts b/clients/client-fsx/src/commands/DeleteDataRepositoryAssociationCommand.ts new file mode 100644 index 000000000000..9577393f9da9 --- /dev/null +++ b/clients/client-fsx/src/commands/DeleteDataRepositoryAssociationCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { DeleteDataRepositoryAssociationRequest, DeleteDataRepositoryAssociationResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DeleteDataRepositoryAssociationCommand, + serializeAws_json1_1DeleteDataRepositoryAssociationCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeleteDataRepositoryAssociationCommandInput extends DeleteDataRepositoryAssociationRequest {} +export interface DeleteDataRepositoryAssociationCommandOutput + extends DeleteDataRepositoryAssociationResponse, + __MetadataBearer {} + +/** + *

                                        Deletes a data repository association on an Amazon FSx for Lustre + * file system. Deleting the data repository association unlinks the + * file system from the Amazon S3 bucket. When deleting a data repository + * association, you have the option of deleting the data in the file system + * that corresponds to the data repository association. Data repository + * associations are supported only for file systems with the + * Persistent_2 deployment type.

                                        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, DeleteDataRepositoryAssociationCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, DeleteDataRepositoryAssociationCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new DeleteDataRepositoryAssociationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteDataRepositoryAssociationCommandInput} for command's `input` shape. + * @see {@link DeleteDataRepositoryAssociationCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class DeleteDataRepositoryAssociationCommand extends $Command< + DeleteDataRepositoryAssociationCommandInput, + DeleteDataRepositoryAssociationCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteDataRepositoryAssociationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "DeleteDataRepositoryAssociationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteDataRepositoryAssociationRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteDataRepositoryAssociationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DeleteDataRepositoryAssociationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteDataRepositoryAssociationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DeleteDataRepositoryAssociationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/DeleteFileSystemCommand.ts b/clients/client-fsx/src/commands/DeleteFileSystemCommand.ts index 5ca4f13540b9..1b9d499781e2 100644 --- a/clients/client-fsx/src/commands/DeleteFileSystemCommand.ts +++ b/clients/client-fsx/src/commands/DeleteFileSystemCommand.ts @@ -22,27 +22,26 @@ export interface DeleteFileSystemCommandInput extends DeleteFileSystemRequest {} export interface DeleteFileSystemCommandOutput extends DeleteFileSystemResponse, __MetadataBearer {} /** - *

                                        Deletes a file system, deleting its contents. After deletion, the file system no - * longer exists, and its data is gone. Any existing automatic backups will also be - * deleted.

                                        - *

                                        To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes - * and SVMs on the file system. Then provide a FileSystemId value to the - * DeleFileSystem operation.

                                        + *

                                        Deletes a file system. After deletion, the file system no longer exists, and its data + * is gone. Any existing automatic backups and snapshots are also deleted.

                                        + *

                                        To delete an Amazon FSx for NetApp ONTAP file system, first delete all the + * volumes and storage virtual machines (SVMs) on the file system. Then provide a + * FileSystemId value to the DeleFileSystem operation.

                                        * - *

                                        By default, when you delete an Amazon FSx for Windows File Server file system, a - * final backup is created upon deletion. This final backup is not subject to the file + *

                                        By default, when you delete an Amazon FSx for Windows File Server file system, + * a final backup is created upon deletion. This final backup isn't subject to the file * system's retention policy, and must be manually deleted.

                                        * - *

                                        The DeleteFileSystem action returns while the file system has the + *

                                        The DeleteFileSystem operation returns while the file system has the * DELETING status. You can check the file system deletion status by - * calling the DescribeFileSystems action, which returns a list of file - * systems in your account. If you pass the file system ID for a deleted file system, the - * DescribeFileSystems returns a FileSystemNotFound + * calling the DescribeFileSystems operation, which returns a list of file systems in your + * account. If you pass the file system ID for a deleted file system, the + * DescribeFileSystems operation returns a FileSystemNotFound * error.

                                        * - *

                                        Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest if - * a data repository task is in a PENDING or EXECUTING - * state.

                                        + *

                                        If a data repository task is in a PENDING or EXECUTING state, + * deleting an Amazon FSx for Lustre file system will fail with an HTTP status + * code 400 (Bad Request).

                                        *
                                        * *

                                        The data in a deleted file system is also deleted and can't be recovered by diff --git a/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts new file mode 100644 index 000000000000..6622571da7c4 --- /dev/null +++ b/clients/client-fsx/src/commands/DeleteSnapshotCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { DeleteSnapshotRequest, DeleteSnapshotResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DeleteSnapshotCommand, + serializeAws_json1_1DeleteSnapshotCommand, +} from "../protocols/Aws_json1_1"; + +export interface DeleteSnapshotCommandInput extends DeleteSnapshotRequest {} +export interface DeleteSnapshotCommandOutput extends DeleteSnapshotResponse, __MetadataBearer {} + +/** + *

                                        Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer + * exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a + * file system backup.

                                        + *

                                        The DeleteSnapshot operation returns instantly. The snapshot appears with + * the lifecycle status of DELETING until the deletion is complete.

                                        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, DeleteSnapshotCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, DeleteSnapshotCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new DeleteSnapshotCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteSnapshotCommandInput} for command's `input` shape. + * @see {@link DeleteSnapshotCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class DeleteSnapshotCommand extends $Command< + DeleteSnapshotCommandInput, + DeleteSnapshotCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteSnapshotCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "DeleteSnapshotCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteSnapshotRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteSnapshotResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteSnapshotCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DeleteSnapshotCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DeleteSnapshotCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/DeleteVolumeCommand.ts b/clients/client-fsx/src/commands/DeleteVolumeCommand.ts index 4bc22683728c..7c252d33632e 100644 --- a/clients/client-fsx/src/commands/DeleteVolumeCommand.ts +++ b/clients/client-fsx/src/commands/DeleteVolumeCommand.ts @@ -22,10 +22,8 @@ export interface DeleteVolumeCommandInput extends DeleteVolumeRequest {} export interface DeleteVolumeCommandOutput extends DeleteVolumeResponse, __MetadataBearer {} /** - *

                                        Deletes an Amazon FSx for NetApp ONTAP volume. When deleting a volume, - * you have the option of creating a final backup. If you create a final backup, you have the option to - * apply Tags to the backup. You need to have fsx:TagResource - * permission in order to apply tags to the backup.

                                        + *

                                        Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS + * volume.

                                        * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/DescribeBackupsCommand.ts b/clients/client-fsx/src/commands/DescribeBackupsCommand.ts index ab54045d8ab6..ef4c651f31e7 100644 --- a/clients/client-fsx/src/commands/DescribeBackupsCommand.ts +++ b/clients/client-fsx/src/commands/DescribeBackupsCommand.ts @@ -22,34 +22,33 @@ export interface DescribeBackupsCommandInput extends DescribeBackupsRequest {} export interface DescribeBackupsCommandOutput extends DescribeBackupsResponse, __MetadataBearer {} /** - *

                                        Returns the description of specific Amazon FSx backups, if - * a BackupIds value is provided for that backup. Otherwise, it returns all - * backups owned by your Amazon Web Services account in the Amazon Web Services Region - * of the endpoint that you're calling.

                                        + *

                                        Returns the description of a specific Amazon FSx backup, if a + * BackupIds value is provided for that backup. Otherwise, it returns all + * backups owned by your Amazon Web Services account in the Amazon Web Services Region of the + * endpoint that you're calling.

                                        * *

                                        When retrieving all backups, you can optionally specify the MaxResults - * parameter to limit the number of backups in a response. If more backups remain, Amazon - * FSx returns a NextToken value in the response. In this case, send a later - * request with the NextToken request parameter set to the value of - * NextToken from the last response.

                                        + * parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case, + * send a later request with the NextToken request parameter set to the value + * of the NextToken value from the last response.

                                        * - *

                                        This action is used in an iterative process to retrieve a list of your backups. - * DescribeBackups is called first without a NextTokenvalue. - * Then the action continues to be called with the NextToken parameter set to - * the value of the last NextToken value until a response has no - * NextToken.

                                        + *

                                        This operation is used in an iterative process to retrieve a list of your backups. + * DescribeBackups is called first without a NextToken value. + * Then the operation continues to be called with the NextToken parameter set + * to the value of the last NextToken value until a response has no + * NextToken value.

                                        * - *

                                        When using this action, keep the following in mind:

                                        + *

                                        When using this operation, keep the following in mind:

                                        *
                                          *
                                        • - *

                                          The implementation might return fewer than MaxResults + *

                                          The operation might return fewer than the MaxResults value of * backup descriptions while still including a NextToken * value.

                                          *
                                        • *
                                        • - *

                                          The order of backups returned in the response of one - * DescribeBackups call and the order of backups returned across - * the responses of a multi-call iteration is unspecified.

                                          + *

                                          The order of the backups returned in the response of one + * DescribeBackups call and the order of the backups returned + * across the responses of a multi-call iteration is unspecified.

                                          *
                                        • *
                                        * @example diff --git a/clients/client-fsx/src/commands/DescribeDataRepositoryAssociationsCommand.ts b/clients/client-fsx/src/commands/DescribeDataRepositoryAssociationsCommand.ts new file mode 100644 index 000000000000..29c989f31f82 --- /dev/null +++ b/clients/client-fsx/src/commands/DescribeDataRepositoryAssociationsCommand.ts @@ -0,0 +1,124 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { + DescribeDataRepositoryAssociationsRequest, + DescribeDataRepositoryAssociationsResponse, +} from "../models/models_0"; +import { + deserializeAws_json1_1DescribeDataRepositoryAssociationsCommand, + serializeAws_json1_1DescribeDataRepositoryAssociationsCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribeDataRepositoryAssociationsCommandInput extends DescribeDataRepositoryAssociationsRequest {} +export interface DescribeDataRepositoryAssociationsCommandOutput + extends DescribeDataRepositoryAssociationsResponse, + __MetadataBearer {} + +/** + *

                                        Returns the description of specific Amazon FSx for Lustre data repository associations, if + * one or more AssociationIds values are provided in the request, or if filters are + * used in the request. Data repository associations are supported only + * for file systems with the Persistent_2 deployment type.

                                        + * + *

                                        You can use filters to narrow the response to include just data repository + * associations for specific file systems (use the file-system-id filter with + * the ID of the file system) or data repository associations for a specific repository type + * (use the data-repository-type filter with a value of S3). + * If you don't use filters, the response returns all data repository associations + * owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint + * that you're calling.

                                        + * + *

                                        When retrieving all data repository associations, you can paginate the response by using + * the optional MaxResults parameter to limit the number of data repository associations + * returned in a response. If more data repository associations remain, Amazon FSx returns a + * NextToken value in the response. In this case, send a later + * request with the NextToken request parameter set to the value of + * NextToken from the last response.

                                        + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, DescribeDataRepositoryAssociationsCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, DescribeDataRepositoryAssociationsCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new DescribeDataRepositoryAssociationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeDataRepositoryAssociationsCommandInput} for command's `input` shape. + * @see {@link DescribeDataRepositoryAssociationsCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class DescribeDataRepositoryAssociationsCommand extends $Command< + DescribeDataRepositoryAssociationsCommandInput, + DescribeDataRepositoryAssociationsCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeDataRepositoryAssociationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "DescribeDataRepositoryAssociationsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeDataRepositoryAssociationsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeDataRepositoryAssociationsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DescribeDataRepositoryAssociationsCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeDataRepositoryAssociationsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1DescribeDataRepositoryAssociationsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/DescribeFileSystemsCommand.ts b/clients/client-fsx/src/commands/DescribeFileSystemsCommand.ts index 75ea11c2d86b..eea39e010802 100644 --- a/clients/client-fsx/src/commands/DescribeFileSystemsCommand.ts +++ b/clients/client-fsx/src/commands/DescribeFileSystemsCommand.ts @@ -24,23 +24,23 @@ export interface DescribeFileSystemsCommandOutput extends DescribeFileSystemsRes /** *

                                        Returns the description of specific Amazon FSx file systems, if a * FileSystemIds value is provided for that file system. Otherwise, it - * returns descriptions of all file systems owned by your Amazon Web Services account in - * the Amazon Web Services Region of the endpoint that you're calling.

                                        + * returns descriptions of all file systems owned by your Amazon Web Services account in the + * Amazon Web Services Region of the endpoint that you're calling.

                                        * *

                                        When retrieving all file system descriptions, you can optionally specify the * MaxResults parameter to limit the number of descriptions in a response. - * If more file system descriptions remain, Amazon FSx returns a NextToken - * value in the response. In this case, send a later request with the - * NextToken request parameter set to the value of NextToken - * from the last response.

                                        + * If more file system descriptions remain, Amazon FSx returns a + * NextToken value in the response. In this case, send a later request + * with the NextToken request parameter set to the value of + * NextToken from the last response.

                                        * - *

                                        This action is used in an iterative process to retrieve a list of your file system + *

                                        This operation is used in an iterative process to retrieve a list of your file system * descriptions. DescribeFileSystems is called first without a - * NextTokenvalue. Then the action continues to be called with the + * NextTokenvalue. Then the operation continues to be called with the * NextToken parameter set to the value of the last NextToken * value until a response has no NextToken.

                                        * - *

                                        When using this action, keep the following in mind:

                                        + *

                                        When using this operation, keep the following in mind:

                                        *
                                          *
                                        • *

                                          The implementation might return fewer than MaxResults file diff --git a/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts new file mode 100644 index 000000000000..56aa6a9584ce --- /dev/null +++ b/clients/client-fsx/src/commands/DescribeSnapshotsCommand.ts @@ -0,0 +1,121 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { DescribeSnapshotsRequest, DescribeSnapshotsResponse } from "../models/models_0"; +import { + deserializeAws_json1_1DescribeSnapshotsCommand, + serializeAws_json1_1DescribeSnapshotsCommand, +} from "../protocols/Aws_json1_1"; + +export interface DescribeSnapshotsCommandInput extends DescribeSnapshotsRequest {} +export interface DescribeSnapshotsCommandOutput extends DescribeSnapshotsResponse, __MetadataBearer {} + +/** + *

                                          Returns the description of specific Amazon FSx snapshots, if a + * SnapshotIds value is provided. Otherwise, this operation returns all + * snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of + * the endpoint that you're calling.

                                          + *

                                          When retrieving all snapshots, you can optionally specify the MaxResults + * parameter to limit the number of snapshots in a response. If more backups remain, + * Amazon FSx returns a NextToken value in the response. In this + * case, send a later request with the NextToken request parameter set to the + * value of NextToken from the last response.

                                          + *

                                          Use this operation in an iterative process to retrieve a list of your snapshots. + * DescribeSnapshots is called first without a NextToken + * value. Then the operation continues to be called with the NextToken + * parameter set to the value of the last NextToken value until a response has + * no NextToken value.

                                          + *

                                          When using this operation, keep the following in mind:

                                          + *
                                            + *
                                          • + *

                                            The operation might return fewer than the MaxResults value of + * snapshot descriptions while still including a NextToken + * value.

                                            + *
                                          • + *
                                          • + *

                                            The order of snapshots returned in the response of one + * DescribeSnapshots call and the order of backups returned across + * the responses of a multi-call iteration is unspecified.

                                            + *
                                          • + *
                                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, DescribeSnapshotsCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, DescribeSnapshotsCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new DescribeSnapshotsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeSnapshotsCommandInput} for command's `input` shape. + * @see {@link DescribeSnapshotsCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class DescribeSnapshotsCommand extends $Command< + DescribeSnapshotsCommandInput, + DescribeSnapshotsCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeSnapshotsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "DescribeSnapshotsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeSnapshotsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeSnapshotsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeSnapshotsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1DescribeSnapshotsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1DescribeSnapshotsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/DescribeVolumesCommand.ts b/clients/client-fsx/src/commands/DescribeVolumesCommand.ts index b6aa29cc1c38..70d61c78f84f 100644 --- a/clients/client-fsx/src/commands/DescribeVolumesCommand.ts +++ b/clients/client-fsx/src/commands/DescribeVolumesCommand.ts @@ -22,7 +22,8 @@ export interface DescribeVolumesCommandInput extends DescribeVolumesRequest {} export interface DescribeVolumesCommandOutput extends DescribeVolumesResponse, __MetadataBearer {} /** - *

                                          Describes one or more Amazon FSx for NetApp ONTAP volumes.

                                          + *

                                          Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for + * OpenZFS volumes.

                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/ReleaseFileSystemNfsV3LocksCommand.ts b/clients/client-fsx/src/commands/ReleaseFileSystemNfsV3LocksCommand.ts new file mode 100644 index 000000000000..3123aedcc91b --- /dev/null +++ b/clients/client-fsx/src/commands/ReleaseFileSystemNfsV3LocksCommand.ts @@ -0,0 +1,101 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { ReleaseFileSystemNfsV3LocksRequest, ReleaseFileSystemNfsV3LocksResponse } from "../models/models_0"; +import { + deserializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand, + serializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand, +} from "../protocols/Aws_json1_1"; + +export interface ReleaseFileSystemNfsV3LocksCommandInput extends ReleaseFileSystemNfsV3LocksRequest {} +export interface ReleaseFileSystemNfsV3LocksCommandOutput + extends ReleaseFileSystemNfsV3LocksResponse, + __MetadataBearer {} + +/** + *

                                          Releases the file system lock from an Amazon FSx for OpenZFS file + * system.

                                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, ReleaseFileSystemNfsV3LocksCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, ReleaseFileSystemNfsV3LocksCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new ReleaseFileSystemNfsV3LocksCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ReleaseFileSystemNfsV3LocksCommandInput} for command's `input` shape. + * @see {@link ReleaseFileSystemNfsV3LocksCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class ReleaseFileSystemNfsV3LocksCommand extends $Command< + ReleaseFileSystemNfsV3LocksCommandInput, + ReleaseFileSystemNfsV3LocksCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ReleaseFileSystemNfsV3LocksCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "ReleaseFileSystemNfsV3LocksCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ReleaseFileSystemNfsV3LocksRequest.filterSensitiveLog, + outputFilterSensitiveLog: ReleaseFileSystemNfsV3LocksResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ReleaseFileSystemNfsV3LocksCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/RestoreVolumeFromSnapshotCommand.ts b/clients/client-fsx/src/commands/RestoreVolumeFromSnapshotCommand.ts new file mode 100644 index 000000000000..1c37a0a2a53f --- /dev/null +++ b/clients/client-fsx/src/commands/RestoreVolumeFromSnapshotCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { RestoreVolumeFromSnapshotRequest, RestoreVolumeFromSnapshotResponse } from "../models/models_0"; +import { + deserializeAws_json1_1RestoreVolumeFromSnapshotCommand, + serializeAws_json1_1RestoreVolumeFromSnapshotCommand, +} from "../protocols/Aws_json1_1"; + +export interface RestoreVolumeFromSnapshotCommandInput extends RestoreVolumeFromSnapshotRequest {} +export interface RestoreVolumeFromSnapshotCommandOutput extends RestoreVolumeFromSnapshotResponse, __MetadataBearer {} + +/** + *

                                          Returns an Amazon FSx for OpenZFS volume to the state saved by the specified + * snapshot.

                                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, RestoreVolumeFromSnapshotCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, RestoreVolumeFromSnapshotCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new RestoreVolumeFromSnapshotCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link RestoreVolumeFromSnapshotCommandInput} for command's `input` shape. + * @see {@link RestoreVolumeFromSnapshotCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class RestoreVolumeFromSnapshotCommand extends $Command< + RestoreVolumeFromSnapshotCommandInput, + RestoreVolumeFromSnapshotCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: RestoreVolumeFromSnapshotCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "RestoreVolumeFromSnapshotCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: RestoreVolumeFromSnapshotRequest.filterSensitiveLog, + outputFilterSensitiveLog: RestoreVolumeFromSnapshotResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: RestoreVolumeFromSnapshotCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1RestoreVolumeFromSnapshotCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1RestoreVolumeFromSnapshotCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/UpdateDataRepositoryAssociationCommand.ts b/clients/client-fsx/src/commands/UpdateDataRepositoryAssociationCommand.ts new file mode 100644 index 000000000000..8c45fb1d2ffe --- /dev/null +++ b/clients/client-fsx/src/commands/UpdateDataRepositoryAssociationCommand.ts @@ -0,0 +1,105 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { UpdateDataRepositoryAssociationRequest, UpdateDataRepositoryAssociationResponse } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateDataRepositoryAssociationCommand, + serializeAws_json1_1UpdateDataRepositoryAssociationCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateDataRepositoryAssociationCommandInput extends UpdateDataRepositoryAssociationRequest {} +export interface UpdateDataRepositoryAssociationCommandOutput + extends UpdateDataRepositoryAssociationResponse, + __MetadataBearer {} + +/** + *

                                          Updates the configuration of an existing data repository association + * on an Amazon FSx for Lustre file system. Data repository associations are + * supported only for file systems with the Persistent_2 deployment type.

                                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, UpdateDataRepositoryAssociationCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, UpdateDataRepositoryAssociationCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new UpdateDataRepositoryAssociationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateDataRepositoryAssociationCommandInput} for command's `input` shape. + * @see {@link UpdateDataRepositoryAssociationCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class UpdateDataRepositoryAssociationCommand extends $Command< + UpdateDataRepositoryAssociationCommandInput, + UpdateDataRepositoryAssociationCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateDataRepositoryAssociationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "UpdateDataRepositoryAssociationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateDataRepositoryAssociationRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateDataRepositoryAssociationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: UpdateDataRepositoryAssociationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateDataRepositoryAssociationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_json1_1UpdateDataRepositoryAssociationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/UpdateFileSystemCommand.ts b/clients/client-fsx/src/commands/UpdateFileSystemCommand.ts index d7f9710054af..a7e6e1058a9d 100644 --- a/clients/client-fsx/src/commands/UpdateFileSystemCommand.ts +++ b/clients/client-fsx/src/commands/UpdateFileSystemCommand.ts @@ -22,69 +22,142 @@ export interface UpdateFileSystemCommandInput extends UpdateFileSystemRequest {} export interface UpdateFileSystemCommandOutput extends UpdateFileSystemResponse, __MetadataBearer {} /** - *

                                          Use this operation to update the configuration of an existing Amazon FSx file system. - * You can update multiple properties in a single request.

                                          + *

                                          Use this operation to update the configuration of an existing Amazon FSx file + * system. You can update multiple properties in a single request.

                                          *

                                          For Amazon FSx for Windows File Server file systems, you can update the following - * properties:

                                          + * properties:

                                          *
                                            *
                                          • - *

                                            AuditLogConfiguration

                                            + *

                                            + * AuditLogConfiguration + *

                                            *
                                          • *
                                          • - *

                                            AutomaticBackupRetentionDays

                                            + *

                                            + * AutomaticBackupRetentionDays + *

                                            *
                                          • *
                                          • - *

                                            DailyAutomaticBackupStartTime

                                            + *

                                            + * DailyAutomaticBackupStartTime + *

                                            *
                                          • *
                                          • - *

                                            SelfManagedActiveDirectoryConfiguration

                                            + *

                                            + * SelfManagedActiveDirectoryConfiguration + *

                                            *
                                          • *
                                          • - *

                                            StorageCapacity

                                            + *

                                            + * StorageCapacity + *

                                            *
                                          • *
                                          • - *

                                            ThroughputCapacity

                                            + *

                                            + * ThroughputCapacity + *

                                            *
                                          • *
                                          • - *

                                            WeeklyMaintenanceStartTime

                                            + *

                                            + * WeeklyMaintenanceStartTime + *

                                            *
                                          • *
                                          - *

                                          For Amazon FSx for Lustre file systems, you can update the following + *

                                          For FSx for Lustre file systems, you can update the following * properties:

                                          *
                                            *
                                          • - *

                                            AutoImportPolicy

                                            + *

                                            + * AutoImportPolicy + *

                                            + *
                                          • + *
                                          • + *

                                            + * AutomaticBackupRetentionDays + *

                                            + *
                                          • + *
                                          • + *

                                            + * DailyAutomaticBackupStartTime + *

                                            + *
                                          • + *
                                          • + *

                                            + * DataCompressionType + *

                                            + *
                                          • + *
                                          • + *

                                            + * StorageCapacity + *

                                            *
                                          • *
                                          • - *

                                            AutomaticBackupRetentionDays

                                            + *

                                            + * WeeklyMaintenanceStartTime + *

                                            *
                                          • + *
                                          + *

                                          For FSx for ONTAP file systems, you can update the following + * properties:

                                          + *
                                            *
                                          • - *

                                            DailyAutomaticBackupStartTime

                                            + *

                                            + * AutomaticBackupRetentionDays + *

                                            *
                                          • *
                                          • - *

                                            DataCompressionType

                                            + *

                                            + * DailyAutomaticBackupStartTime + *

                                            *
                                          • *
                                          • - *

                                            StorageCapacity

                                            + *

                                            + * FsxAdminPassword + *

                                            *
                                          • *
                                          • - *

                                            WeeklyMaintenanceStartTime

                                            + *

                                            + * WeeklyMaintenanceStartTime + *

                                            *
                                          • *
                                          - *

                                          For Amazon FSx for NetApp ONTAP file systems, you can update the following + *

                                          For the Amazon FSx for OpenZFS file systems, you can update the following * properties:

                                          *
                                            *
                                          • - *

                                            AutomaticBackupRetentionDays

                                            + *

                                            + * AutomaticBackupRetentionDays + *

                                            + *
                                          • + *
                                          • + *

                                            + * CopyTagsToBackups + *

                                            + *
                                          • + *
                                          • + *

                                            + * CopyTagsToVolumes + *

                                            + *
                                          • + *
                                          • + *

                                            + * DailyAutomaticBackupStartTime + *

                                            *
                                          • *
                                          • - *

                                            DailyAutomaticBackupStartTime

                                            + *

                                            + * DiskIopsConfiguration + *

                                            *
                                          • *
                                          • - *

                                            FsxAdminPassword

                                            + *

                                            + * ThroughputCapacity + *

                                            *
                                          • *
                                          • - *

                                            WeeklyMaintenanceStartTime

                                            + *

                                            + * WeeklyMaintenanceStartTime + *

                                            *
                                          • *
                                          * @example diff --git a/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts b/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts new file mode 100644 index 000000000000..7fbaeadccbf1 --- /dev/null +++ b/clients/client-fsx/src/commands/UpdateSnapshotCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { FSxClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../FSxClient"; +import { UpdateSnapshotRequest, UpdateSnapshotResponse } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateSnapshotCommand, + serializeAws_json1_1UpdateSnapshotCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateSnapshotCommandInput extends UpdateSnapshotRequest {} +export interface UpdateSnapshotCommandOutput extends UpdateSnapshotResponse, __MetadataBearer {} + +/** + *

                                          Updates the name of a snapshot.

                                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { FSxClient, UpdateSnapshotCommand } from "@aws-sdk/client-fsx"; // ES Modules import + * // const { FSxClient, UpdateSnapshotCommand } = require("@aws-sdk/client-fsx"); // CommonJS import + * const client = new FSxClient(config); + * const command = new UpdateSnapshotCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateSnapshotCommandInput} for command's `input` shape. + * @see {@link UpdateSnapshotCommandOutput} for command's `response` shape. + * @see {@link FSxClientResolvedConfig | config} for FSxClient's `config` shape. + * + */ +export class UpdateSnapshotCommand extends $Command< + UpdateSnapshotCommandInput, + UpdateSnapshotCommandOutput, + FSxClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateSnapshotCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: FSxClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "FSxClient"; + const commandName = "UpdateSnapshotCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateSnapshotRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateSnapshotResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateSnapshotCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateSnapshotCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1UpdateSnapshotCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-fsx/src/commands/UpdateVolumeCommand.ts b/clients/client-fsx/src/commands/UpdateVolumeCommand.ts index 4dab3ba6866e..8232e9d127dd 100644 --- a/clients/client-fsx/src/commands/UpdateVolumeCommand.ts +++ b/clients/client-fsx/src/commands/UpdateVolumeCommand.ts @@ -22,7 +22,7 @@ export interface UpdateVolumeCommandInput extends UpdateVolumeRequest {} export interface UpdateVolumeCommandOutput extends UpdateVolumeResponse, __MetadataBearer {} /** - *

                                          Updates an Amazon FSx for NetApp ONTAP volume's configuration.

                                          + *

                                          Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-fsx/src/commands/index.ts b/clients/client-fsx/src/commands/index.ts index 586affd26177..65d10312baa1 100644 --- a/clients/client-fsx/src/commands/index.ts +++ b/clients/client-fsx/src/commands/index.ts @@ -2,26 +2,36 @@ export * from "./AssociateFileSystemAliasesCommand"; export * from "./CancelDataRepositoryTaskCommand"; export * from "./CopyBackupCommand"; export * from "./CreateBackupCommand"; +export * from "./CreateDataRepositoryAssociationCommand"; export * from "./CreateDataRepositoryTaskCommand"; export * from "./CreateFileSystemCommand"; export * from "./CreateFileSystemFromBackupCommand"; +export * from "./CreateSnapshotCommand"; export * from "./CreateStorageVirtualMachineCommand"; export * from "./CreateVolumeCommand"; export * from "./CreateVolumeFromBackupCommand"; export * from "./DeleteBackupCommand"; +export * from "./DeleteDataRepositoryAssociationCommand"; export * from "./DeleteFileSystemCommand"; +export * from "./DeleteSnapshotCommand"; export * from "./DeleteStorageVirtualMachineCommand"; export * from "./DeleteVolumeCommand"; export * from "./DescribeBackupsCommand"; +export * from "./DescribeDataRepositoryAssociationsCommand"; export * from "./DescribeDataRepositoryTasksCommand"; export * from "./DescribeFileSystemAliasesCommand"; export * from "./DescribeFileSystemsCommand"; +export * from "./DescribeSnapshotsCommand"; export * from "./DescribeStorageVirtualMachinesCommand"; export * from "./DescribeVolumesCommand"; export * from "./DisassociateFileSystemAliasesCommand"; export * from "./ListTagsForResourceCommand"; +export * from "./ReleaseFileSystemNfsV3LocksCommand"; +export * from "./RestoreVolumeFromSnapshotCommand"; export * from "./TagResourceCommand"; export * from "./UntagResourceCommand"; +export * from "./UpdateDataRepositoryAssociationCommand"; export * from "./UpdateFileSystemCommand"; +export * from "./UpdateSnapshotCommand"; export * from "./UpdateStorageVirtualMachineCommand"; export * from "./UpdateVolumeCommand"; diff --git a/clients/client-fsx/src/models/models_0.ts b/clients/client-fsx/src/models/models_0.ts index 72ee5d84fc5e..400cbb8ef4b0 100644 --- a/clients/client-fsx/src/models/models_0.ts +++ b/clients/client-fsx/src/models/models_0.ts @@ -2,11 +2,12 @@ import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; /** - *

                                          The Microsoft AD attributes of the Amazon FSx for Windows File Server file system.

                                          + *

                                          The Microsoft Active Directory attributes of the Amazon FSx for Windows File + * Server file system.

                                          */ export interface ActiveDirectoryBackupAttributes { /** - *

                                          The fully qualified domain name of the self-managed AD directory.

                                          + *

                                          The fully qualified domain name of the self-managed Active Directory directory.

                                          */ DomainName?: string; @@ -75,7 +76,10 @@ export enum AdministrativeActionType { FILE_SYSTEM_ALIAS_ASSOCIATION = "FILE_SYSTEM_ALIAS_ASSOCIATION", FILE_SYSTEM_ALIAS_DISASSOCIATION = "FILE_SYSTEM_ALIAS_DISASSOCIATION", FILE_SYSTEM_UPDATE = "FILE_SYSTEM_UPDATE", + RELEASE_NFS_V3_LOCKS = "RELEASE_NFS_V3_LOCKS", + SNAPSHOT_UPDATE = "SNAPSHOT_UPDATE", STORAGE_OPTIMIZATION = "STORAGE_OPTIMIZATION", + VOLUME_UPDATE = "VOLUME_UPDATE", } /** @@ -106,8 +110,8 @@ export enum Status { } /** - *

                                          A structure providing details of any failures that occur when creating the file system - * has failed.

                                          + *

                                          A structure providing details of any failures that occurred when creating a file + * system.

                                          */ export interface FileSystemFailureDetails { /** @@ -128,6 +132,7 @@ export namespace FileSystemFailureDetails { export enum FileSystemType { LUSTRE = "LUSTRE", ONTAP = "ONTAP", + OPENZFS = "OPENZFS", WINDOWS = "WINDOWS", } @@ -148,12 +153,13 @@ export enum DataCompressionType { export enum AutoImportPolicyType { NEW = "NEW", NEW_CHANGED = "NEW_CHANGED", + NEW_CHANGED_DELETED = "NEW_CHANGED_DELETED", NONE = "NONE", } /** *

                                          Provides detailed information about the data respository if its Lifecycle is - * set to MISCONFIGURED.

                                          + * set to MISCONFIGURED or FAILED.

                                          */ export interface DataRepositoryFailureDetails { /** @@ -175,6 +181,7 @@ export enum DataRepositoryLifecycle { AVAILABLE = "AVAILABLE", CREATING = "CREATING", DELETING = "DELETING", + FAILED = "FAILED", MISCONFIGURED = "MISCONFIGURED", UPDATING = "UPDATING", } @@ -182,6 +189,8 @@ export enum DataRepositoryLifecycle { /** *

                                          The data repository configuration object for Lustre file systems returned in the response of * the CreateFileSystem operation.

                                          + *

                                          This data type is not supported for file systems with the Persistent_2 deployment type. + * Instead, use .

                                          */ export interface DataRepositoryConfiguration { /** @@ -207,7 +216,13 @@ export interface DataRepositoryConfiguration { *
                                        • *
                                        • *

                                          - * UPDATING - The data repository is undergoing a customer initiated update and availability may be impacted.

                                          + * UPDATING - The data repository is undergoing a customer + * initiated update and availability may be impacted.

                                          + *
                                        • + *
                                        • + *

                                          + * FAILED - The data repository is in a terminal state that + * cannot be recovered.

                                          *
                                        • *
                                        */ @@ -261,17 +276,22 @@ export interface DataRepositoryConfiguration { *

                                        * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports * file and directory listings of any new objects added to the S3 bucket and any - * existing objects that are changed in the S3 bucket after you choose this option. - *

                                        + * existing objects that are changed in the S3 bucket after you choose this option.

                                        + *
                                      • + *
                                      • + *

                                        + * NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically + * imports file and directory listings of any new objects added to the S3 bucket, any + * existing objects that are changed in the S3 bucket, and any objects that were deleted + * in the S3 bucket.

                                        *
                                      • *
                                      - *

                                      For more information, see Automatically import updates from your S3 bucket.

                                      */ AutoImportPolicy?: AutoImportPolicyType | string; /** *

                                      Provides detailed information about the data respository if its Lifecycle is - * set to MISCONFIGURED.

                                      + * set to MISCONFIGURED or FAILED.

                                      */ FailureDetails?: DataRepositoryFailureDetails; } @@ -287,6 +307,7 @@ export namespace DataRepositoryConfiguration { export enum LustreDeploymentType { PERSISTENT_1 = "PERSISTENT_1", + PERSISTENT_2 = "PERSISTENT_2", SCRATCH_1 = "SCRATCH_1", SCRATCH_2 = "SCRATCH_2", } @@ -296,51 +317,129 @@ export enum DriveCacheType { READ = "READ", } +export enum LustreAccessAuditLogLevel { + DISABLED = "DISABLED", + ERROR_ONLY = "ERROR_ONLY", + WARN_ERROR = "WARN_ERROR", + WARN_ONLY = "WARN_ONLY", +} + +/** + *

                                      The configuration for Lustre logging used to write the enabled + * logging events for your file system to Amazon CloudWatch Logs.

                                      + *

                                      When logging is enabled, Lustre logs error and warning events + * from data repository operations such as automatic export and data repository tasks. + * To learn more about Lustre logging, see + * Logging with Amazon CloudWatch Logs. + *

                                      + */ +export interface LustreLogConfiguration { + /** + *

                                      The data repository events that are logged by Amazon FSx.

                                      + *
                                        + *
                                      • + *

                                        + * WARN_ONLY - only warning events are logged.

                                        + *
                                      • + *
                                      • + *

                                        + * ERROR_ONLY - only error events are logged.

                                        + *
                                      • + *
                                      • + *

                                        + * WARN_ERROR - both warning events and error events + * are logged.

                                        + *
                                      • + *
                                      • + *

                                        + * DISABLED - logging of data repository events + * is turned off.

                                        + *
                                      • + *
                                      + */ + Level: LustreAccessAuditLogLevel | string | undefined; + + /** + *

                                      The Amazon Resource Name (ARN) that specifies the destination of the logs. + * The destination can be any Amazon CloudWatch Logs log group ARN. The destination + * ARN must be in the same Amazon Web Services partition, Amazon Web Services Region, + * and Amazon Web Services account as your Amazon FSx file system.

                                      + */ + Destination?: string; +} + +export namespace LustreLogConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LustreLogConfiguration): any => ({ + ...obj, + }); +} + /** *

                                      The configuration for the Amazon FSx for Lustre file system.

                                      */ export interface LustreFileSystemConfiguration { /** *

                                      The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC - * time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

                                      + * time zone. Here, d is the weekday number, from 1 through 7, beginning with Monday and + * ending with Sunday.

                                      */ WeeklyMaintenanceStartTime?: string; /** *

                                      The data repository configuration object for Lustre file systems returned in the response of * the CreateFileSystem operation.

                                      + *

                                      This data type is not supported for file systems with the Persistent_2 deployment type. + * Instead, use .

                                      */ DataRepositoryConfiguration?: DataRepositoryConfiguration; /** - *

                                      The deployment type of the FSX for Lustre file system. Scratch deployment type is designed for temporary storage + *

                                      The deployment type of the FSx for Lustre file system. + * Scratch deployment type is designed for temporary storage * and shorter-term processing of data.

                                      *

                                      - * SCRATCH_1 and SCRATCH_2 deployment - * types are best suited for when you need temporary storage and shorter-term processing of data. - * The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst - * throughput capacity than SCRATCH_1.

                                      - *

                                      The PERSISTENT_1 deployment type is used for longer-term storage - * and workloads and encryption of data in transit. To learn more about deployment types, see - * - * FSx for Lustre Deployment Options. (Default = SCRATCH_1)

                                      + * SCRATCH_1 and SCRATCH_2 deployment types are best suited + * for when you need temporary storage and shorter-term processing of data. The + * SCRATCH_2 deployment type provides in-transit encryption of data and higher burst + * throughput capacity than SCRATCH_1.

                                      + *

                                      The PERSISTENT_1 and PERSISTENT_2 deployment type is used + * for longer-term storage and workloads and encryption of data in transit. + * PERSISTENT_2 is built on Lustre v2.12 and offers higher + * PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower + * minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see + * FSx for Lustre deployment options.

                                      + *

                                      The default is SCRATCH_1.

                                      */ DeploymentType?: LustreDeploymentType | string; /** - *

                                      Per unit storage throughput represents the megabytes per second of read or write + *

                                      Per unit storage throughput represents the megabytes per second of read or write * throughput per 1 tebibyte of storage provisioned. File system throughput capacity is * equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is - * only valid for PERSISTENT_1 deployment types.

                                      - *

                                      Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: 12, 40.

                                      + * only valid for PERSISTENT_1 and PERSISTENT_2 deployment types.

                                      + *

                                      Valid values:

                                      + *
                                        + *
                                      • + *

                                        For PERSISTENT_1 SSD storage: 50, 100, 200.

                                        + *
                                      • + *
                                      • + *

                                        For PERSISTENT_1 HDD storage: 12, 40.

                                        + *
                                      • + *
                                      • + *

                                        For PERSISTENT_2 SSD storage: 125, 250, 500, 1000.

                                        + *
                                      • + *
                                      */ PerUnitStorageThroughput?: number; /** *

                                      You use the MountName value when mounting the file system.

                                      *

                                      For the SCRATCH_1 deployment type, this value is always "fsx". - * For SCRATCH_2 and PERSISTENT_1 deployment types, this - * value is a string that is unique within an Amazon Web Services Region. + * For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment + * types, this value is a string that is unique within an Amazon Web Services Region. * *

                                      */ @@ -354,13 +453,14 @@ export interface LustreFileSystemConfiguration { DailyAutomaticBackupStartTime?: string; /** - *

                                      The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                      + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      */ AutomaticBackupRetentionDays?: number; /** - *

                                      A boolean flag indicating whether tags on the file system should be copied to backups. + *

                                      A boolean flag indicating whether tags on the file system are copied to backups. * If it's set to true, all tags on the file system are * copied to all automatic backups and any user-initiated backups where the user * doesn't specify any tags. If this value is true, and you specify one or more tags, @@ -371,10 +471,11 @@ export interface LustreFileSystemConfiguration { CopyTagsToBackups?: boolean; /** - *

                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with - * HDD storage devices. This parameter is required when storage type is HDD. Set to - * READ, improve the performance for frequently accessed files and allows 20% - * of the total storage capacity of the file system to be cached.

                                      + *

                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with + * HDD storage devices. This parameter is required when StorageType is HDD. When set to + * READ the file system has an SSD storage cache that is sized to 20% of the file system's + * storage capacity. This improves the performance for frequently accessed files by caching up to 20% + * of the total storage capacity.

                                      *

                                      This parameter is required when StorageType is set to HDD.

                                      */ DriveCacheType?: DriveCacheType | string; @@ -397,6 +498,12 @@ export interface LustreFileSystemConfiguration { *

                                      For more information, see Lustre data compression.

                                      */ DataCompressionType?: DataCompressionType | string; + + /** + *

                                      The Lustre logging configuration. Lustre logging writes the enabled log + * events for your file system to Amazon CloudWatch Logs.

                                      + */ + LogConfiguration?: LustreLogConfiguration; } export namespace LustreFileSystemConfiguration { @@ -418,12 +525,10 @@ export enum DiskIopsConfigurationMode { } /** - *

                                      The SSD IOPS (input/output operations per second) configuration - * for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS - * per GB of storage capacity, but you can provision additional IOPS - * per GB of storage. The configuration consists of the total number - * of provisioned SSD IOPS and how the amount was provisioned - * (by the customer or by the system).

                                      + *

                                      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The + * default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per + * GB of storage. The configuration consists of the total number of provisioned SSD IOPS + * and how the amount was provisioned (by the customer or by the system).

                                      */ export interface DiskIopsConfiguration { /** @@ -509,8 +614,9 @@ export namespace FileSystemEndpoints { */ export interface OntapFileSystemConfiguration { /** - *

                                      The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                      + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      */ AutomaticBackupRetentionDays?: number; @@ -547,7 +653,7 @@ export interface OntapFileSystemConfiguration { /** *

                                      The ID for a subnet. A subnet is a range of IP addresses in - * your virtual private cloud (VPC). For more information, see VPC and Subnets in the + * your virtual private cloud (VPC). For more information, see VPC and subnets in the * Amazon VPC User Guide. *

                                      */ @@ -560,7 +666,7 @@ export interface OntapFileSystemConfiguration { RouteTableIds?: string[]; /** - *

                                      Sustained throughput of an Amazon FSx file system in MBps.

                                      + *

                                      The sustained throughput of an Amazon FSx file system in MBps.

                                      */ ThroughputCapacity?: number; @@ -586,6 +692,98 @@ export namespace OntapFileSystemConfiguration { }); } +export enum OpenZFSDeploymentType { + SINGLE_AZ_1 = "SINGLE_AZ_1", +} + +/** + *

                                      The configuration for the Amazon FSx for OpenZFS file system.

                                      + */ +export interface OpenZFSFileSystemConfiguration { + /** + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      + */ + AutomaticBackupRetentionDays?: number; + + /** + *

                                      A Boolean value indicating whether tags on the file system should be copied to + * backups. + * If it's set to true, all tags on the file system are copied to all + * automatic backups and any user-initiated backups where the user doesn't specify any + * tags. If this value is true and you specify one or more tags, only the + * specified tags are copied to backups. If you specify one or more tags when creating a + * user-initiated backup, no tags are copied from the file system, regardless of this + * value.

                                      + */ + CopyTagsToBackups?: boolean; + + /** + *

                                      A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true and you specify one or more tags, only the specified tags are + * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags + * are copied from the volume, regardless of this value.

                                      + */ + CopyTagsToVolumes?: boolean; + + /** + *

                                      A recurring daily time, in the format HH:MM. HH is the + * zero-padded hour of the day (0-23), and MM is the zero-padded minute of the + * hour. For example, 05:00 specifies 5 AM daily.

                                      + */ + DailyAutomaticBackupStartTime?: string; + + /** + *

                                      Specifies the file-system deployment type. Amazon FSx for OpenZFS supports + * SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a + * single Availability Zone (AZ) of redundancy.

                                      + */ + DeploymentType?: OpenZFSDeploymentType | string; + + /** + *

                                      The throughput of an Amazon FSx file system, measured in megabytes per second + * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

                                      + */ + ThroughputCapacity?: number; + + /** + *

                                      A recurring weekly time, in the format D:HH:MM.

                                      + *

                                      + * D is the day of the week, for which 1 represents Monday and 7 + * represents Sunday. For further details, see the ISO-8601 spec as described on Wikipedia.

                                      + *

                                      + * HH is the zero-padded hour of the day (0-23), and MM is + * the zero-padded minute of the hour.

                                      + *

                                      For example, 1:05:00 specifies maintenance at 5 AM Monday.

                                      + */ + WeeklyMaintenanceStartTime?: string; + + /** + *

                                      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The + * default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per + * GB of storage. The configuration consists of the total number of provisioned SSD IOPS + * and how the amount was provisioned (by the customer or by the system).

                                      + */ + DiskIopsConfiguration?: DiskIopsConfiguration; + + /** + *

                                      The ID of the root volume of the OpenZFS file system.

                                      + */ + RootVolumeId?: string; +} + +export namespace OpenZFSFileSystemConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenZFSFileSystemConfiguration): any => ({ + ...obj, + }); +} + export enum StorageType { HDD = "HDD", SSD = "SSD", @@ -970,7 +1168,15 @@ export namespace WindowsFileSystemConfiguration { }); } +export enum SnapshotLifecycle { + AVAILABLE = "AVAILABLE", + CREATING = "CREATING", + DELETING = "DELETING", + PENDING = "PENDING", +} + export enum VolumeLifecycle { + AVAILABLE = "AVAILABLE", CREATED = "CREATED", CREATING = "CREATING", DELETING = "DELETING", @@ -1074,7 +1280,7 @@ export namespace TieringPolicy { } /** - *

                                      The configuration of an Amazon FSx for NetApp ONTAP volume

                                      + *

                                      The configuration of an Amazon FSx for NetApp ONTAP volume.

                                      */ export interface OntapVolumeConfiguration { /** @@ -1099,16 +1305,19 @@ export interface OntapVolumeConfiguration { FlexCacheEndpointType?: FlexCacheEndpointType | string; /** - *

                                      Specifies the directory that NAS clients use to mount the volume, along with the SVM DNS name or IP address. - * You can create a JunctionPath directly below a parent volume junction or on a - * directory within a volume. A JunctionPath for a volume named vol3 might - * be /vol1/vol2/vol3, or /vol1/dir2/vol3, or even /dir1/dir2/vol3..

                                      + *

                                      Specifies the directory that network-attached storage (NAS) clients use to mount the + * volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP + * address. You can create a JunctionPath directly below a parent volume + * junction or on a directory within a volume. A JunctionPath for a volume + * named vol3 might be /vol1/vol2/vol3, or + * /vol1/dir2/vol3, or even /dir1/dir2/vol3.

                                      */ JunctionPath?: string; /** - *

                                      The security style for the volume, which can be UNIX, - * NTFS, or MIXED.

                                      + *

                                      The security style for the volume, which can be UNIX, NTFS, + * or + * MIXED.

                                      */ SecurityStyle?: SecurityStyle | string; @@ -1128,13 +1337,13 @@ export interface OntapVolumeConfiguration { StorageVirtualMachineId?: string; /** - *

                                      A boolean flag indicating whether this volume is the root volume for - * its storage virtual machine (SVM). Only one volume on an SVM can be the - * root volume. This value defaults to false. If this value is true, then - * this is the SVM root volume.

                                      - *

                                      This flag is useful when you're deleting an SVM, because you must - * first delete all non-root volumes. This flag, when set to false, helps - * you identify which volumes to delete before you can delete the SVM.

                                      + *

                                      A Boolean flag indicating whether this volume is the root volume for its storage + * virtual machine (SVM). Only one volume on an SVM can be the root volume. This value + * defaults to false. If this value is true, then this is the SVM + * root volume.

                                      + *

                                      This flag is useful when you're deleting an SVM, because you must first delete all + * non-root volumes. This flag, when set to false, helps you identify which + * volumes to delete before you can delete the SVM.

                                      */ StorageVirtualMachineRoot?: boolean; @@ -1144,7 +1353,7 @@ export interface OntapVolumeConfiguration { TieringPolicy?: TieringPolicy; /** - *

                                      The volume's UUID (universally unique identifier).

                                      + *

                                      The volume's universally unique identifier (UUID).

                                      */ UUID?: string; @@ -1153,20 +1362,19 @@ export interface OntapVolumeConfiguration { *
                                        *
                                      • *

                                        - * RW specifies a read-write volume. - * RW is the default.

                                        + * RW specifies a read/write volume. RW is the default.

                                        *
                                      • *
                                      • *

                                        - * DP specifies a data protection volume. You can - * protect data by replicating it to data protection mirror copies and use - * data protection mirror copies to recover data when a disaster occurs.

                                        + * DP specifies a data-protection volume. You can protect data by replicating it to + * data-protection mirror copies. If a disaster occurs, you can use these + * data-protection mirror copies to recover data.

                                        *
                                      • *
                                      • *

                                        - * LS specifies a load-sharing mirror volume. - * A load-sharing mirror reduces the network traffic to a FlexVol volume - * by providing additional read-only access to clients.

                                        + * LS specifies a load-sharing mirror volume. A load-sharing mirror reduces the + * network traffic to a FlexVol volume by providing additional read-only access to + * clients.

                                        *
                                      • *
                                      */ @@ -1182,166 +1390,313 @@ export namespace OntapVolumeConfiguration { }); } -export enum VolumeType { - ONTAP = "ONTAP", +export enum OpenZFSDataCompressionType { + NONE = "NONE", + ZSTD = "ZSTD", } /** - *

                                      Describes an Amazon FSx for NetApp ONTAP volume.

                                      + *

                                      Specifies who can mount the file system and the options that can be used while + * mounting the file system.

                                      */ -export interface Volume { - /** - *

                                      The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), - * also known as Unix time.

                                      - */ - CreationTime?: Date; - +export interface OpenZFSClientConfiguration { /** - *

                                      The globally unique ID of the file system, assigned by Amazon FSx.

                                      + *

                                      A value that specifies who can mount the file system. You can provide a wildcard + * character (*), an IP address (0.0.0.0), or a CIDR address + * (192.0.2.0/24. By default, Amazon FSx uses the wildcard + * character when specifying the client.

                                      */ - FileSystemId?: string; + Clients: string | undefined; /** - *

                                      The lifecycle status of the volume.

                                      + *

                                      The options to use when mounting the file system. For a list of options that you can + * use with Network File System (NFS), see the exports(5) - Linux man page. When + * choosing your options, consider the following:

                                      *
                                        *
                                      • - *

                                        - * CREATED - The volume is fully available for use.

                                        - *
                                      • - *
                                      • - *

                                        - * CREATING - Amazon FSx is creating the new volume.

                                        - *
                                      • - *
                                      • - *

                                        - * DELETING - Amazon FSx is deleting an existing volume.

                                        - *
                                      • - *
                                      • - *

                                        - * FAILED - Amazon FSx was unable to create the volume.

                                        - *
                                      • - *
                                      • - *

                                        - * MISCONFIGURED - The volume is in a failed but recoverable state.

                                        + *

                                        + * crossmount is used by default. If you don't specify + * crossmount when changing the client configuration, you won't be + * able to see or access snapshots in your file system's snapshot directory.

                                        *
                                      • *
                                      • - *

                                        - * PENDING - Amazon FSx has not started creating the volume.

                                        + *

                                        + * sync is used by default. If you instead specify + * async, the system acknowledges writes before writing to disk. + * If the system crashes before the writes are finished, you lose the unwritten + * data.

                                        *
                                      • *
                                      */ - Lifecycle?: VolumeLifecycle | string; + Options: string[] | undefined; +} +export namespace OpenZFSClientConfiguration { /** - *

                                      The name of the volume.

                                      + * @internal */ - Name?: string; + export const filterSensitiveLog = (obj: OpenZFSClientConfiguration): any => ({ + ...obj, + }); +} +/** + *

                                      The Network File System NFS) configurations for mounting an Amazon FSx for + * OpenZFS file system.

                                      + */ +export interface OpenZFSNfsExport { /** - *

                                      The configuration of an Amazon FSx for NetApp ONTAP volume

                                      + *

                                      A list of configuration objects that contain the client and options for mounting the + * OpenZFS file system.

                                      */ - OntapConfiguration?: OntapVolumeConfiguration; + ClientConfigurations: OpenZFSClientConfiguration[] | undefined; +} + +export namespace OpenZFSNfsExport { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenZFSNfsExport): any => ({ + ...obj, + }); +} + +export enum OpenZFSCopyStrategy { + CLONE = "CLONE", + FULL_COPY = "FULL_COPY", +} +/** + *

                                      The snapshot configuration to use when creating an OpenZFS volume from a + * snapshot.

                                      + */ +export interface OpenZFSOriginSnapshotConfiguration { /** *

                                      The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services * resources. We require an ARN when you need to specify a resource unambiguously across * all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in * the Amazon Web Services General Reference.

                                      */ - ResourceARN?: string; + SnapshotARN?: string; /** - *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + *

                                      The strategy used when copying data from the snapshot to the new volume.

                                      + *
                                        + *
                                      • + *

                                        + * CLONE - The new volume references the data in the origin + * snapshot. Cloning a snapshot is faster than copying the data from a snapshot to + * a new volume and doesn't consume disk throughput. However, the origin snapshot + * can't be deleted if there is a volume using its copied data.

                                        + *
                                      • + *
                                      • + *

                                        + * FULL_COPY - Copies all data from the snapshot to the new volume. + *

                                        + *
                                      • + *
                                      */ - Tags?: Tag[]; + CopyStrategy?: OpenZFSCopyStrategy | string; +} +export namespace OpenZFSOriginSnapshotConfiguration { /** - *

                                      The system-generated, unique ID of the volume.

                                      + * @internal */ - VolumeId?: string; + export const filterSensitiveLog = (obj: OpenZFSOriginSnapshotConfiguration): any => ({ + ...obj, + }); +} + +export enum OpenZFSQuotaType { + GROUP = "GROUP", + USER = "USER", +} +/** + *

                                      The configuration for how much storage a user or group can use on the volume.

                                      + */ +export interface OpenZFSUserOrGroupQuota { /** - *

                                      The type of volume; ONTAP is the only valid volume type.

                                      + *

                                      A value that specifies whether the quota applies to a user or group.

                                      */ - VolumeType?: VolumeType | string; + Type: OpenZFSQuotaType | string | undefined; /** - *

                                      Describes why the volume lifecycle state changed.

                                      + *

                                      The ID of the user or group.

                                      */ - LifecycleTransitionReason?: LifecycleTransitionReason; + Id: number | undefined; + + /** + *

                                      The amount of storage that the user or group can use in gibibytes (GiB).

                                      + */ + StorageCapacityQuotaGiB: number | undefined; } -export namespace Volume { +export namespace OpenZFSUserOrGroupQuota { /** * @internal */ - export const filterSensitiveLog = (obj: Volume): any => ({ + export const filterSensitiveLog = (obj: OpenZFSUserOrGroupQuota): any => ({ ...obj, }); } /** - *

                                      The request object specifying one or more DNS alias names to associate with an Amazon FSx for Windows File Server file system.

                                      + *

                                      The configuration of an Amazon FSx for OpenZFS volume.

                                      */ -export interface AssociateFileSystemAliasesRequest { +export interface OpenZFSVolumeConfiguration { /** - *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 - * ASCII characters. This token is automatically filled on your behalf when you use the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + *

                                      The ID of the parent volume.

                                      */ - ClientRequestToken?: string; + ParentVolumeId?: string; /** - *

                                      Specifies the file system with which you want to associate one or more DNS aliases.

                                      + *

                                      The path to the volume from the root volume. For example, + * fsx/parentVolume/volume1.

                                      */ - FileSystemId: string | undefined; + VolumePath?: string; /** - *

                                      An array of one or more DNS alias names to associate with the file system. - * The alias name has to comply with the following formatting requirements:

                                      - *
                                        - *
                                      • - *

                                        Formatted as a fully-qualified domain name (FQDN), - * hostname.domain - * , - * for example, accounting.corp.example.com.

                                        - *
                                      • - *
                                      • - *

                                        Can contain alphanumeric characters and the hyphen (-).

                                        - *
                                      • + *

                                        The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't + * reserve more storage than the parent volume has reserved.

                                        + */ + StorageCapacityReservationGiB?: number; + + /** + *

                                        The maximum amount of storage in gibibtyes (GiB) that the volume can use from its + * parent. You can specify a quota larger than the storage on the parent volume.

                                        + */ + StorageCapacityQuotaGiB?: number; + + /** + *

                                        The method used to compress the data on the volume. Unless a compression type is + * specified, volumes inherit the DataCompressionType value of their parent + * volume.

                                        + *
                                          *
                                        • - *

                                          Cannot start or end with a hyphen.

                                          + *

                                          + * NONE - Doesn't compress the data on the volume.

                                          *
                                        • *
                                        • - *

                                          Can start with a numeric.

                                          + *

                                          + * ZSTD - Compresses the data in the volume using the Zstandard + * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on + * your volume and has very little impact on compute resources.

                                          *
                                        • *
                                        - *

                                        For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: - * as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

                                        */ - Aliases: string[] | undefined; -} + DataCompressionType?: OpenZFSDataCompressionType | string; -export namespace AssociateFileSystemAliasesRequest { /** - * @internal + *

                                        A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true and you specify one or more tags, only the specified tags are + * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags + * are copied from the volume, regardless of this value.

                                        */ - export const filterSensitiveLog = (obj: AssociateFileSystemAliasesRequest): any => ({ - ...obj, - }); -} + CopyTagsToSnapshots?: boolean; -/** - *

                                        The system generated response showing the DNS aliases that - * Amazon FSx is attempting to associate with the file system. - * Use the API - * operation to monitor the status of the aliases Amazon FSx is - * associating with the file system. It can take up to 2.5 minutes for - * the alias status to change from CREATING to AVAILABLE.

                                        - */ -export interface AssociateFileSystemAliasesResponse { /** - *

                                        An array of the DNS aliases that Amazon FSx is associating with the file system.

                                        + *

                                        The configuration object that specifies the snapshot to use as the origin of the data + * for the volume.

                                        + */ + OriginSnapshot?: OpenZFSOriginSnapshotConfiguration; + + /** + *

                                        A Boolean value indicating whether the volume is read-only.

                                        + */ + ReadOnly?: boolean; + + /** + *

                                        The configuration object for mounting a Network File System (NFS) file + * system.

                                        + */ + NfsExports?: OpenZFSNfsExport[]; + + /** + *

                                        An object specifying how much storage users or groups can use on the volume.

                                        + */ + UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; +} + +export namespace OpenZFSVolumeConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenZFSVolumeConfiguration): any => ({ + ...obj, + }); +} + +export enum VolumeType { + ONTAP = "ONTAP", + OPENZFS = "OPENZFS", +} + +/** + *

                                        The request object specifying one or more DNS alias names to associate with an Amazon FSx for Windows File Server file system.

                                        + */ +export interface AssociateFileSystemAliasesRequest { + /** + *

                                        (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                        + */ + ClientRequestToken?: string; + + /** + *

                                        Specifies the file system with which you want to associate one or more DNS aliases.

                                        + */ + FileSystemId: string | undefined; + + /** + *

                                        An array of one or more DNS alias names to associate with the file system. + * The alias name has to comply with the following formatting requirements:

                                        + *
                                          + *
                                        • + *

                                          Formatted as a fully-qualified domain name (FQDN), + * hostname.domain + * , + * for example, accounting.corp.example.com.

                                          + *
                                        • + *
                                        • + *

                                          Can contain alphanumeric characters and the hyphen (-).

                                          + *
                                        • + *
                                        • + *

                                          Cannot start or end with a hyphen.

                                          + *
                                        • + *
                                        • + *

                                          Can start with a numeric.

                                          + *
                                        • + *
                                        + *

                                        For DNS alias names, Amazon FSx stores alphabetic characters as lowercase letters (a-z), regardless of how you specify them: + * as uppercase letters, lowercase letters, or the corresponding letters in escape codes.

                                        + */ + Aliases: string[] | undefined; +} + +export namespace AssociateFileSystemAliasesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateFileSystemAliasesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                        The system generated response showing the DNS aliases that + * Amazon FSx is attempting to associate with the file system. + * Use the API + * operation to monitor the status of the aliases Amazon FSx is + * associating with the file system. It can take up to 2.5 minutes for + * the alias status to change from CREATING to AVAILABLE.

                                        + */ +export interface AssociateFileSystemAliasesResponse { + /** + *

                                        An array of the DNS aliases that Amazon FSx is associating with the file system.

                                        */ Aliases?: Alias[]; } @@ -1418,6 +1773,100 @@ export namespace InternalServerError { }); } +export enum EventType { + CHANGED = "CHANGED", + DELETED = "DELETED", + NEW = "NEW", +} + +/** + *

                                        Describes a data repository association's automatic export policy. The + * AutoExportPolicy defines the types of updated objects on the + * file system that will be automatically exported to the data repository. + * As you create, modify, or delete files, Amazon FSx automatically + * exports the defined changes asynchronously once your application finishes + * modifying the file.

                                        + *

                                        This AutoExportPolicy is supported only for file systems with the + * Persistent_2 deployment type.

                                        + */ +export interface AutoExportPolicy { + /** + *

                                        The AutoExportPolicy can have the following event values:

                                        + *
                                          + *
                                        • + *

                                          + * NEW - Amazon FSx automatically exports new files and + * directories to the data repository as they are added to the file system.

                                          + *
                                        • + *
                                        • + *

                                          + * CHANGED - Amazon FSx automatically exports changes to + * files and directories on the file system to the data repository.

                                          + *
                                        • + *
                                        • + *

                                          + * DELETED - Files and directories are automatically deleted + * on the data repository when they are deleted on the file system.

                                          + *
                                        • + *
                                        + *

                                        You can define any combination of event types for your AutoExportPolicy.

                                        + */ + Events?: (EventType | string)[]; +} + +export namespace AutoExportPolicy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoExportPolicy): any => ({ + ...obj, + }); +} + +/** + *

                                        Describes the data repository association's automatic import policy. + * The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory + * listings up to date by importing changes to your file system as you modify + * objects in a linked S3 bucket.

                                        + *

                                        This AutoImportPolicy is supported only for file systems + * with the Persistent_2 deployment type.

                                        + */ +export interface AutoImportPolicy { + /** + *

                                        The AutoImportPolicy can have the following event values:

                                        + *
                                          + *
                                        • + *

                                          + * NEW - Amazon FSx automatically imports metadata of + * files added to the linked S3 bucket that do not currently exist in the FSx + * file system.

                                          + *
                                        • + *
                                        • + *

                                          + * CHANGED - Amazon FSx automatically updates file + * metadata and invalidates existing file content on the file system as files + * change in the data repository.

                                          + *
                                        • + *
                                        • + *

                                          + * DELETED - Amazon FSx automatically deletes files + * on the file system as corresponding files are deleted in the data repository.

                                          + *
                                        • + *
                                        + *

                                        You can define any combination of event types for your AutoImportPolicy.

                                        + */ + Events?: (EventType | string)[]; +} + +export namespace AutoImportPolicy { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoImportPolicy): any => ({ + ...obj, + }); +} + /** *

                                        Cancels a data repository task.

                                        */ @@ -1587,38 +2036,37 @@ export interface CopyBackupRequest { ClientRequestToken?: string; /** - *

                                        The ID of the source backup. Specifies the ID of the backup that is - * being copied.

                                        + *

                                        The ID of the source backup. Specifies the ID of the backup that's being copied.

                                        */ SourceBackupId: string | undefined; /** - *

                                        The source Amazon Web Services Region of the backup. Specifies the Amazon Web Services Region from which - * the backup is being copied. The source and destination Regions must be in - * the same Amazon Web Services partition. If you don't specify a Region, it defaults to - * the Region where the request is sent from (in-Region copy).

                                        + *

                                        The source Amazon Web Services Region of the backup. Specifies the Amazon Web Services Region from which the backup is being copied. The source and destination + * Regions must be in the same Amazon Web Services partition. If you don't specify a + * Region, SourceRegion defaults to the Region where the request is sent from + * (in-Region copy).

                                        */ SourceRegion?: string; /** - *

                                        The ID of the Key Management Service (KMS) key used to encrypt the file system's data - * for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and - * Amazon FSx for Lustre PERSISTENT_1 file systems at rest. If not specified, the Amazon FSx - * managed key is used. The Amazon FSx for Lustre SCRATCH_1 and SCRATCH_2 file systems - * are always encrypted at rest using Amazon FSx managed keys. For more information, see Encrypt + *

                                        The ID of the Key Management Service (KMS) key used to encrypt the file + * system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and Amazon FSx for Lustre + * PERSISTENT_1 and PERSISTENT_2 file systems at rest. If this ID + * isn't specified, the key managed by Amazon FSx is used. The Amazon FSx for Lustre + * SCRATCH_1 and SCRATCH_2 file systems are always encrypted at + * rest using Amazon FSx-managed keys. For more information, see Encrypt * in the Key Management Service API Reference.

                                        */ KmsKeyId?: string; /** - *

                                        A boolean flag indicating whether tags from the source backup - * should be copied to the backup copy. This value defaults to false.

                                        - *

                                        If you set CopyTags to true and the source backup has - * existing tags, you can use the Tags parameter to create new - * tags, provided that the sum of the source backup tags and the new tags - * doesn't exceed 50. Both sets of tags are merged. If there are tag - * conflicts (for example, two tags with the same key but different values), - * the tags created with the Tags parameter take precedence.

                                        + *

                                        A Boolean flag indicating whether tags from the source backup should be copied to the + * backup copy. This value defaults to false.

                                        + *

                                        If you set CopyTags to true and the source backup has existing + * tags, you can use the Tags parameter to create new tags, provided that the sum + * of the source backup tags and the new tags doesn't exceed 50. Both sets of tags are + * merged. If there are tag conflicts (for example, two tags with the same key but different + * values), the tags created with the Tags parameter take precedence.

                                        */ CopyTags?: boolean; @@ -1643,7 +2091,7 @@ export namespace CopyBackupRequest { */ export interface BackupFailureDetails { /** - *

                                        A message describing the backup creation failure.

                                        + *

                                        A message describing the backup-creation failure.

                                        */ Message?: string; } @@ -1707,9 +2155,8 @@ export namespace IncompatibleParameterError { } /** - *

                                        Amazon FSx doesn't support Multi-AZ Windows File Server - * copy backup in the destination Region, so the copied backup - * can't be restored.

                                        + *

                                        Amazon FSx doesn't support Multi-AZ Windows File Server copy backup in the + * destination Region, so the copied backup can't be restored.

                                        */ export interface IncompatibleRegionForMultiAZ extends __SmithyException, $MetadataBearer { name: "IncompatibleRegionForMultiAZ"; @@ -1730,8 +2177,8 @@ export namespace IncompatibleRegionForMultiAZ { } /** - *

                                        The Key Management Service (KMS) key of the destination - * backup is invalid.

                                        + *

                                        The Key Management Service (KMS) key of the destination backup is not + * valid.

                                        */ export interface InvalidDestinationKmsKey extends __SmithyException, $MetadataBearer { name: "InvalidDestinationKmsKey"; @@ -1752,8 +2199,8 @@ export namespace InvalidDestinationKmsKey { } /** - *

                                        The Region provided for Source Region is invalid or - * is in a different Amazon Web Services partition.

                                        + *

                                        The Region provided for SourceRegion is not valid or is in a different + * Amazon Web Services partition.

                                        */ export interface InvalidRegion extends __SmithyException, $MetadataBearer { name: "InvalidRegion"; @@ -1774,8 +2221,8 @@ export namespace InvalidRegion { } /** - *

                                        The Key Management Service (KMS) key of the source backup - * is invalid.

                                        + *

                                        The Key Management Service (KMS) key of the source backup is not + * valid.

                                        */ export interface InvalidSourceKmsKey extends __SmithyException, $MetadataBearer { name: "InvalidSourceKmsKey"; @@ -1835,8 +2282,8 @@ export namespace ServiceLimitExceeded { } /** - *

                                        The request was rejected because the lifecycle status of the - * source backup is not AVAILABLE.

                                        + *

                                        The request was rejected because the lifecycle status of the source backup isn't + * AVAILABLE.

                                        */ export interface SourceBackupUnavailable extends __SmithyException, $MetadataBearer { name: "SourceBackupUnavailable"; @@ -1847,7 +2294,7 @@ export interface SourceBackupUnavailable extends __SmithyException, $MetadataBea Message?: string; /** - *

                                        The ID of the source backup. Specifies the backup you are copying.

                                        + *

                                        The ID of the source backup. Specifies the backup that you are copying.

                                        */ BackupId?: string; } @@ -1893,21 +2340,23 @@ export interface CreateBackupRequest { FileSystemId?: string; /** - *

                                        (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure - * idempotent creation. This string is automatically filled on your behalf when you use the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                        + *

                                        (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to + * ensure idempotent creation. This string is automatically filled on your behalf when you + * use the Command Line Interface (CLI) or an Amazon Web Services SDK.

                                        */ ClientRequestToken?: string; /** *

                                        (Optional) The tags to apply to the backup at backup creation. The key value of the - * Name tag appears in the console as the backup name. If you have set CopyTagsToBackups to true, and - * you specify one or more tags using the CreateBackup action, no existing file system tags are copied from the file system to the backup.

                                        + * Name tag appears in the console as the backup name. If you have set + * CopyTagsToBackups to true, and you specify one or more + * tags using the CreateBackup operation, no existing file system tags are + * copied from the file system to the backup.

                                        */ Tags?: Tag[]; /** - *

                                        The ID of he FSx for NetApp ONTAP volume to back up.

                                        + *

                                        (Optional) The ID of the FSx for ONTAP volume to back up.

                                        */ VolumeId?: string; } @@ -1942,92 +2391,98 @@ export namespace VolumeNotFound { }); } -export enum ReportFormat { - REPORT_CSV_20191124 = "REPORT_CSV_20191124", -} - -export enum ReportScope { - FAILED_FILES_ONLY = "FAILED_FILES_ONLY", -} - /** - *

                                        Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report Scope parameter. - * FSx delivers the report to the file system's linked data repository in Amazon S3, - * using the path specified in the report Path parameter. - * You can specify whether or not a report gets generated for a task using the Enabled parameter.

                                        + *

                                        The configuration for an Amazon S3 data repository linked to an + * Amazon FSx Lustre file system with a data repository association. + * The configuration consists of an AutoImportPolicy that + * defines file events on the data repository are automatically + * imported to the file system and an AutoExportPolicy + * that defines which file events on the file system are automatically + * exported to the data repository. File events are when files or + * directories are added, changed, or deleted on the file system or + * the data repository.

                                        */ -export interface CompletionReport { - /** - *

                                        Set Enabled to True to generate a CompletionReport when the task completes. - * If set to true, then you need to provide a report Scope, Path, and Format. - * Set Enabled to False if you do not want a CompletionReport generated when the task completes.

                                        - */ - Enabled: boolean | undefined; - - /** - *

                                        Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. - * The Path you provide must be located within the file system’s ExportPath. - * An example Path value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: - * FilePath, FileStatus, and ErrorCode. To learn more about a file system's ExportPath, see . - *

                                        - */ - Path?: string; - +export interface S3DataRepositoryConfiguration { /** - *

                                        Required if Enabled is set to true. Specifies the format of the CompletionReport. REPORT_CSV_20191124 is the only format currently supported. - * When Format is set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, and is delivered to - * {path}/task-{id}/failures.csv. - *

                                        + *

                                        Specifies the type of updated objects (new, changed, deleted) + * that will be automatically imported from the linked S3 bucket + * to your file system.

                                        */ - Format?: ReportFormat | string; + AutoImportPolicy?: AutoImportPolicy; /** - *

                                        Required if Enabled is set to true. Specifies the scope of the CompletionReport; FAILED_FILES_ONLY is the only scope currently supported. - * When Scope is set to FAILED_FILES_ONLY, the CompletionReport only contains information about files that the data repository task failed to process.

                                        + *

                                        Specifies the type of updated objects (new, changed, deleted) + * that will be automatically exported from your file system to + * the linked S3 bucket.

                                        */ - Scope?: ReportScope | string; + AutoExportPolicy?: AutoExportPolicy; } -export namespace CompletionReport { +export namespace S3DataRepositoryConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: CompletionReport): any => ({ + export const filterSensitiveLog = (obj: S3DataRepositoryConfiguration): any => ({ ...obj, }); } -export enum DataRepositoryTaskType { - EXPORT = "EXPORT_TO_REPOSITORY", -} +export interface CreateDataRepositoryAssociationRequest { + /** + *

                                        The globally unique ID of the file system, assigned by Amazon FSx.

                                        + */ + FileSystemId: string | undefined; -export interface CreateDataRepositoryTaskRequest { /** - *

                                        Specifies the type of data repository task to create.

                                        + *

                                        A path on the file system that points to a high-level directory (such + * as /ns1/) or subdirectory (such as /ns1/subdir/) + * that will be mapped 1-1 with DataRepositoryPath. + * The leading forward slash in the name is required. Two data repository + * associations cannot have overlapping file system paths. For example, if + * a data repository is associated with file system path /ns1/, + * then you cannot link another data repository with file system + * path /ns1/ns2.

                                        + *

                                        This path specifies where in your file system files will be exported + * from or imported to. This file system directory can be linked to only one + * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                                        */ - Type: DataRepositoryTaskType | string | undefined; + FileSystemPath: string | undefined; /** - *

                                        (Optional) The path or paths on the Amazon FSx file system to use when the data repository task is processed. - * The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. - * If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or - * file on the file system you want to export, then the path to provide is path1. - * If a path that you provide isn't valid, the task fails.

                                        + *

                                        The path to the Amazon S3 data repository that will be linked to the file + * system. The path can be an S3 bucket or prefix in the format + * s3://myBucket/myPrefix/. This path specifies where in the S3 + * data repository files will be imported from or exported to.

                                        */ - Paths?: string[]; + DataRepositoryPath: string | undefined; /** - *

                                        The globally unique ID of the file system, assigned by Amazon FSx.

                                        + *

                                        Set to true to run an import data repository task to import + * metadata from the data repository to the file system after the data repository + * association is created. Default is false.

                                        */ - FileSystemId: string | undefined; + BatchImportMetaDataOnCreate?: boolean; /** - *

                                        Defines whether or not Amazon FSx provides a CompletionReport once the task has completed. - * A CompletionReport provides a detailed report on the files that Amazon FSx processed that meet the criteria specified by the - * Scope parameter. For more information, see - * Working with Task Completion Reports.

                                        + *

                                        For files imported from a data repository, this value determines the stripe count and + * maximum amount of data per file (in MiB) stored on a single physical disk. The maximum + * number of disks that a single file can be striped across is limited by the total number + * of disks that make up the file system.

                                        + * + *

                                        The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 + * GiB). Amazon S3 objects have a maximum size of 5 TB.

                                        */ - Report: CompletionReport | undefined; + ImportedFileChunkSize?: number; + + /** + *

                                        The configuration for an Amazon S3 data repository linked to an + * Amazon FSx Lustre file system with a data repository association. + * The configuration defines which file events (new, changed, or + * deleted files or directories) are automatically imported from + * the linked data repository to the file system or automatically + * exported from the file system to the data repository.

                                        + */ + S3?: S3DataRepositoryConfiguration; /** *

                                        (Optional) An idempotency token for resource creation, in a string of up to 64 @@ -2042,60 +2497,348 @@ export interface CreateDataRepositoryTaskRequest { Tags?: Tag[]; } -export namespace CreateDataRepositoryTaskRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateDataRepositoryTaskRequest): any => ({ - ...obj, - }); -} - -/** - *

                                        Provides information about why a data repository task failed. Only populated when the task Lifecycle is set to FAILED.

                                        - */ -export interface DataRepositoryTaskFailureDetails { - /** - *

                                        A detailed error message.

                                        - */ - Message?: string; -} - -export namespace DataRepositoryTaskFailureDetails { +export namespace CreateDataRepositoryAssociationRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DataRepositoryTaskFailureDetails): any => ({ + export const filterSensitiveLog = (obj: CreateDataRepositoryAssociationRequest): any => ({ ...obj, }); } /** - *

                                        Provides the task status showing a running total of the total number of files to be processed, - * the number successfully processed, and the number of files the task failed to process.

                                        + *

                                        The configuration of a data repository association that links + * an Amazon FSx for Lustre file system to an Amazon S3 bucket. + * The data repository association configuration object is returned + * in the response of the following operations:

                                        + *
                                          + *
                                        • + *

                                          + * CreateDataRepositoryAssociation + *

                                          + *
                                        • + *
                                        • + *

                                          + * UpdateDataRepositoryAssociation + *

                                          + *
                                        • + *
                                        • + *

                                          + * DescribeDataRepositoryAssociations + *

                                          + *
                                        • + *
                                        + *

                                        Data repository associations are supported only for file systems + * with the Persistent_2 deployment type.

                                        */ -export interface DataRepositoryTaskStatus { +export interface DataRepositoryAssociation { /** - *

                                        The total number of files that the task will process. While a task is executing, the sum of - * SucceededCount plus FailedCount may not equal TotalCount. When the task is complete, - * TotalCount equals the sum of SucceededCount plus FailedCount.

                                        + *

                                        The system-generated, unique ID of the data repository association.

                                        */ - TotalCount?: number; + AssociationId?: string; /** - *

                                        A running total of the number of files that the task has successfully processed.

                                        + *

                                        The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services + * resources. We require an ARN when you need to specify a resource unambiguously across + * all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in + * the Amazon Web Services General Reference.

                                        */ - SucceededCount?: number; + ResourceARN?: string; /** - *

                                        A running total of the number of files that the task failed to process.

                                        + *

                                        The globally unique ID of the file system, assigned by Amazon FSx.

                                        */ - FailedCount?: number; + FileSystemId?: string; /** - *

                                        The time at which the task status was last updated.

                                        - */ - LastUpdatedTime?: Date; + *

                                        Describes the state of a data repository association. The lifecycle can have + * the following values:

                                        + *
                                          + *
                                        • + *

                                          + * CREATING - The data repository association between + * the FSx file system and the S3 data repository is being created. + * The data repository is unavailable.

                                          + *
                                        • + *
                                        • + *

                                          + * AVAILABLE - The data repository association is + * available for use.

                                          + *
                                        • + *
                                        • + *

                                          + * MISCONFIGURED - Amazon FSx cannot automatically import updates + * from the S3 bucket or automatically export updates to the S3 bucket until the data + * repository association configuration is corrected.

                                          + *
                                        • + *
                                        • + *

                                          + * UPDATING - The data repository association is undergoing + * a customer initiated update that might affect its availability.

                                          + *
                                        • + *
                                        • + *

                                          + * DELETING - The data repository association is undergoing + * a customer initiated deletion.

                                          + *
                                        • + *
                                        • + *

                                          + * FAILED - The data repository association is in a terminal + * state that cannot be recovered.

                                          + *
                                        • + *
                                        + */ + Lifecycle?: DataRepositoryLifecycle | string; + + /** + *

                                        Provides detailed information about the data respository if its Lifecycle is + * set to MISCONFIGURED or FAILED.

                                        + */ + FailureDetails?: DataRepositoryFailureDetails; + + /** + *

                                        A path on the file system that points to a high-level directory (such + * as /ns1/) or subdirectory (such as /ns1/subdir/) + * that will be mapped 1-1 with DataRepositoryPath. + * The leading forward slash in the name is required. Two data repository + * associations cannot have overlapping file system paths. For example, if + * a data repository is associated with file system path /ns1/, + * then you cannot link another data repository with file system + * path /ns1/ns2.

                                        + *

                                        This path specifies where in your file system files will be exported + * from or imported to. This file system directory can be linked to only one + * Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                                        + */ + FileSystemPath?: string; + + /** + *

                                        The path to the Amazon S3 data repository that will be linked to the file + * system. The path can be an S3 bucket or prefix in the format + * s3://myBucket/myPrefix/. This path specifies where in the S3 + * data repository files will be imported from or exported to.

                                        + */ + DataRepositoryPath?: string; + + /** + *

                                        A boolean flag indicating whether an import data repository task to import + * metadata should run after the data repository association is created. The + * task runs if this flag is set to true.

                                        + */ + BatchImportMetaDataOnCreate?: boolean; + + /** + *

                                        For files imported from a data repository, this value determines the stripe count and + * maximum amount of data per file (in MiB) stored on a single physical disk. The maximum + * number of disks that a single file can be striped across is limited by the total number + * of disks that make up the file system.

                                        + * + *

                                        The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 + * GiB). Amazon S3 objects have a maximum size of 5 TB.

                                        + */ + ImportedFileChunkSize?: number; + + /** + *

                                        The configuration for an Amazon S3 data repository linked to an + * Amazon FSx Lustre file system with a data repository association. + * The configuration defines which file events (new, changed, or + * deleted files or directories) are automatically imported from + * the linked data repository to the file system or automatically + * exported from the file system to the data repository.

                                        + */ + S3?: S3DataRepositoryConfiguration; + + /** + *

                                        A list of Tag values, with a maximum of 50 elements.

                                        + */ + Tags?: Tag[]; + + /** + *

                                        The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), + * also known as Unix time.

                                        + */ + CreationTime?: Date; +} + +export namespace DataRepositoryAssociation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataRepositoryAssociation): any => ({ + ...obj, + }); +} + +export interface CreateDataRepositoryAssociationResponse { + /** + *

                                        The response object returned after the data repository association is created.

                                        + */ + Association?: DataRepositoryAssociation; +} + +export namespace CreateDataRepositoryAssociationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateDataRepositoryAssociationResponse): any => ({ + ...obj, + }); +} + +export enum ReportFormat { + REPORT_CSV_20191124 = "REPORT_CSV_20191124", +} + +export enum ReportScope { + FAILED_FILES_ONLY = "FAILED_FILES_ONLY", +} + +/** + *

                                        Provides a report detailing the data repository task results of the files processed that match the criteria specified in the report Scope parameter. + * FSx delivers the report to the file system's linked data repository in Amazon S3, + * using the path specified in the report Path parameter. + * You can specify whether or not a report gets generated for a task using the Enabled parameter.

                                        + */ +export interface CompletionReport { + /** + *

                                        Set Enabled to True to generate a CompletionReport when the task completes. + * If set to true, then you need to provide a report Scope, Path, and Format. + * Set Enabled to False if you do not want a CompletionReport generated when the task completes.

                                        + */ + Enabled: boolean | undefined; + + /** + *

                                        Required if Enabled is set to true. Specifies the location of the report on the file system's linked S3 data repository. An absolute path that defines where the completion report will be stored in the destination location. + * The Path you provide must be located within the file system’s ExportPath. + * An example Path value is "s3://myBucket/myExportPath/optionalPrefix". The report provides the following information for each file in the report: + * FilePath, FileStatus, and ErrorCode. To learn more about a file system's ExportPath, see . + *

                                        + */ + Path?: string; + + /** + *

                                        Required if Enabled is set to true. Specifies the format of the CompletionReport. REPORT_CSV_20191124 is the only format currently supported. + * When Format is set to REPORT_CSV_20191124, the CompletionReport is provided in CSV format, and is delivered to + * {path}/task-{id}/failures.csv. + *

                                        + */ + Format?: ReportFormat | string; + + /** + *

                                        Required if Enabled is set to true. Specifies the scope of the CompletionReport; FAILED_FILES_ONLY is the only scope currently supported. + * When Scope is set to FAILED_FILES_ONLY, the CompletionReport only contains information about files that the data repository task failed to process.

                                        + */ + Scope?: ReportScope | string; +} + +export namespace CompletionReport { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CompletionReport): any => ({ + ...obj, + }); +} + +export enum DataRepositoryTaskType { + EXPORT = "EXPORT_TO_REPOSITORY", + IMPORT = "IMPORT_METADATA_FROM_REPOSITORY", +} + +export interface CreateDataRepositoryTaskRequest { + /** + *

                                        Specifies the type of data repository task to create.

                                        + */ + Type: DataRepositoryTaskType | string | undefined; + + /** + *

                                        (Optional) The path or paths on the Amazon FSx file system to use when the data repository task is processed. + * The default path is the file system root directory. The paths you provide need to be relative to the mount point of the file system. + * If the mount point is /mnt/fsx and /mnt/fsx/path1 is a directory or + * file on the file system you want to export, then the path to provide is path1. + * If a path that you provide isn't valid, the task fails.

                                        + */ + Paths?: string[]; + + /** + *

                                        The globally unique ID of the file system, assigned by Amazon FSx.

                                        + */ + FileSystemId: string | undefined; + + /** + *

                                        Defines whether or not Amazon FSx provides a CompletionReport once the task has completed. + * A CompletionReport provides a detailed report on the files that Amazon FSx processed that meet the criteria specified by the + * Scope parameter. For more information, see + * Working with Task Completion Reports.

                                        + */ + Report: CompletionReport | undefined; + + /** + *

                                        (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                        + */ + ClientRequestToken?: string; + + /** + *

                                        A list of Tag values, with a maximum of 50 elements.

                                        + */ + Tags?: Tag[]; +} + +export namespace CreateDataRepositoryTaskRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateDataRepositoryTaskRequest): any => ({ + ...obj, + }); +} + +/** + *

                                        Provides information about why a data repository task failed. Only populated when the task Lifecycle is set to FAILED.

                                        + */ +export interface DataRepositoryTaskFailureDetails { + /** + *

                                        A detailed error message.

                                        + */ + Message?: string; +} + +export namespace DataRepositoryTaskFailureDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataRepositoryTaskFailureDetails): any => ({ + ...obj, + }); +} + +/** + *

                                        Provides the task status showing a running total of the total number of files to be processed, + * the number successfully processed, and the number of files the task failed to process.

                                        + */ +export interface DataRepositoryTaskStatus { + /** + *

                                        The total number of files that the task will process. While a task is executing, the sum of + * SucceededCount plus FailedCount may not equal TotalCount. When the task is complete, + * TotalCount equals the sum of SucceededCount plus FailedCount.

                                        + */ + TotalCount?: number; + + /** + *

                                        A running total of the number of files that the task has successfully processed.

                                        + */ + SucceededCount?: number; + + /** + *

                                        A running total of the number of files that the task failed to process.

                                        + */ + FailedCount?: number; + + /** + *

                                        The time at which the task status was last updated.

                                        + */ + LastUpdatedTime?: Date; } export namespace DataRepositoryTaskStatus { @@ -2109,7 +2852,7 @@ export namespace DataRepositoryTaskStatus { /** *

                                        A description of the data repository task. You use data repository tasks - * to perform bulk transfer operations between your Amazon FSx file system and its linked data + * to perform bulk transfer operations between your Amazon FSx file system and a linked data * repository.

                                        */ export interface DataRepositoryTask { @@ -2157,7 +2900,17 @@ export interface DataRepositoryTask { Lifecycle: DataRepositoryTaskLifecycle | string | undefined; /** - *

                                        The type of data repository task; EXPORT_TO_REPOSITORY is the only type currently supported.

                                        + *

                                        The type of data repository task.

                                        + *
                                          + *
                                        • + *

                                          The EXPORT_TO_REPOSITORY data repository task exports + * from your Lustre file system from to a linked S3 bucket.

                                          + *
                                        • + *
                                        • + *

                                          The IMPORT_METADATA_FROM_REPOSITORY data repository task + * imports metadata changes from a linked S3 bucket to your Lustre file system.

                                          + *
                                        • + *
                                        */ Type: DataRepositoryTaskType | string | undefined; @@ -2269,43 +3022,162 @@ export namespace DataRepositoryTaskExecuting { } /** - *

                                        The Lustre configuration for the file system being created. - *

                                        + *

                                        The Lustre logging configuration used when creating or updating an + * Amazon FSx for Lustre file system. Lustre logging writes the enabled + * logging events for your file system to Amazon CloudWatch Logs.

                                        + *

                                        Error and warning events can be logged from the following data + * repository operations:

                                        + *
                                          + *
                                        • + *

                                          Automatic export

                                          + *
                                        • + *
                                        • + *

                                          Data repository tasks

                                          + *
                                        • + *
                                        + *

                                        To learn more about Lustre logging, see + * Logging to Amazon CloudWatch Logs.

                                        */ -export interface CreateFileSystemLustreConfiguration { - /** - *

                                        (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC - * time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

                                        - */ - WeeklyMaintenanceStartTime?: string; - +export interface LustreLogCreateConfiguration { /** - *

                                        (Optional) The path to the Amazon S3 bucket (including the optional prefix) that - * you're using as the data repository for your Amazon FSx for Lustre file system. - * The root of your FSx for Lustre file system will - * be mapped to the root of the Amazon S3 bucket you select. An - * example is s3://import-bucket/optional-prefix. If you specify a prefix - * after the Amazon S3 bucket name, only object keys with that prefix are loaded into the - * file system.

                                        + *

                                        Sets which data repository events are logged by Amazon FSx.

                                        + *
                                          + *
                                        • + *

                                          + * WARN_ONLY - only warning events are logged.

                                          + *
                                        • + *
                                        • + *

                                          + * ERROR_ONLY - only error events are logged.

                                          + *
                                        • + *
                                        • + *

                                          + * WARN_ERROR - both warning events and error events + * are logged.

                                          + *
                                        • + *
                                        • + *

                                          + * DISABLED - logging of data repository events + * is turned off.

                                          + *
                                        • + *
                                        + */ + Level: LustreAccessAuditLogLevel | string | undefined; + + /** + *

                                        The Amazon Resource Name (ARN) that specifies the destination of the logs.

                                        + *

                                        The destination can be any Amazon CloudWatch Logs log group ARN, with the following + * requirements:

                                        + *
                                          + *
                                        • + *

                                          The destination ARN that you provide must be in the same Amazon Web Services partition, + * Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

                                          + *
                                        • + *
                                        • + *

                                          The name of the Amazon CloudWatch Logs log group must begin with + * the /aws/fsx prefix.

                                          + *
                                        • + *
                                        • + *

                                          If you do not provide a destination, Amazon FSx will create and use a + * log stream in the CloudWatch Logs /aws/fsx/lustre log group.

                                          + *
                                        • + *
                                        • + *

                                          If Destination is provided and the resource does not + * exist, the request will fail with a BadRequest error.

                                          + *
                                        • + *
                                        • + *

                                          If Level is set to DISABLED, you cannot specify + * a destination in Destination.

                                          + *
                                        • + *
                                        + */ + Destination?: string; +} + +export namespace LustreLogCreateConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LustreLogCreateConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                                        The Lustre configuration for the file system being created.

                                        + * + *

                                        The following parameters are not supported for file systems with the Persistent_2 + * deployment type. Instead, use CreateDataRepositoryAssociation + * to create a data repository association to link your Lustre file system to a data repository.

                                        + *
                                          + *
                                        • + *

                                          + * AutoImportPolicy + *

                                          + *
                                        • + *
                                        • + *

                                          + * ExportPath + *

                                          + *
                                        • + *
                                        • + *

                                          + * ImportedChunkSize + *

                                          + *
                                        • + *
                                        • + *

                                          + * ImportPath + *

                                          + *
                                        • + *
                                        + *
                                        + */ +export interface CreateFileSystemLustreConfiguration { + /** + *

                                        (Optional) The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC + * time zone, where d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

                                        + */ + WeeklyMaintenanceStartTime?: string; + + /** + *

                                        (Optional) The path to the Amazon S3 bucket (including the optional prefix) that + * you're using as the data repository for your Amazon FSx for Lustre file system. + * The root of your FSx for Lustre file system will + * be mapped to the root of the Amazon S3 bucket you select. An + * example is s3://import-bucket/optional-prefix. If you specify a prefix + * after the Amazon S3 bucket name, only object keys with that prefix are loaded into the + * file system.

                                        + * + *

                                        This parameter is not supported for file systems with the Persistent_2 deployment type. + * Instead, use CreateDataRepositoryAssociation to create + * a data repository association to link your Lustre file system to a data repository.

                                        + *
                                        */ ImportPath?: string; /** - *

                                        (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. + *

                                        (Optional) Available with Scratch and Persistent_1 deployment types. + * Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. * The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which * new and changed data is to be exported from your Amazon FSx for Lustre file system. If * an ExportPath value is not provided, Amazon FSx sets a default export path, * s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in * UTC format, for example * s3://import-bucket/FSxLustre20181105T222312Z.

                                        - * *

                                        The Amazon S3 export bucket must be the same as the import bucket specified by - * ImportPath. If you only specify a bucket name, such as - * s3://import-bucket, you get a 1:1 mapping of file system objects to S3 + * ImportPath. If you specify only a bucket name, such as + * s3://import-bucket, you get a 1:1 mapping of file system objects to S3 * bucket objects. This mapping means that the input data in S3 is overwritten on export. * If you provide a custom prefix in the export path, such as * s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file * system to that export prefix in the Amazon S3 bucket.

                                        + * + * + *

                                        This parameter is not supported for file systems with the Persistent_2 deployment type. + * Instead, use CreateDataRepositoryAssociation to create + * a data repository association to link your Lustre file system to a data repository.

                                        + *
                                        */ ExportPath?: string; @@ -2316,38 +3188,52 @@ export interface CreateFileSystemLustreConfiguration { * number of disks that make up the file system.

                                        * *

                                        The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 - * GiB). Amazon S3 objects have a maximum size of 5 TB.

                                        + * GiB). Amazon S3 objects have a maximum size of 5 TB.

                                        + *

                                        This parameter is not supported for file systems with the Persistent_2 deployment type. + * Instead, use CreateDataRepositoryAssociation to create + * a data repository association to link your Lustre file system to a data repository.

                                        */ ImportedFileChunkSize?: number; /** - *

                                        - * Choose SCRATCH_1 and SCRATCH_2 deployment + *

                                        (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment * types when you need temporary storage and shorter-term processing of data. * The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst * throughput capacity than SCRATCH_1.

                                        + *

                                        Choose PERSISTENT_1 for longer-term storage and for throughput-focused + * workloads that aren’t latency-sensitive. a. + * PERSISTENT_1 supports encryption of data in transit, and is available in all + * Amazon Web Services Regions in which FSx for Lustre is available.

                                        + *

                                        Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads + * that require the highest levels of IOPS/throughput. PERSISTENT_2 supports + * SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 + * is available in a limited number of Amazon Web Services Regions. + * For more information, and an up-to-date list of Amazon Web Services Regions in which + * PERSISTENT_2 is available, see + * File + * system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

                                        + * + *

                                        If you choose PERSISTENT_2, and you set FileSystemTypeVersion to + * 2.10, the CreateFileSystem operation fails.

                                        + *
                                        * - *

                                        Choose PERSISTENT_1 deployment type for longer-term storage - * and workloads and encryption of data in transit. To learn more about deployment types, see - * - * FSx for Lustre Deployment Options.

                                        - *

                                        Encryption of data in-transit is automatically - * enabled when you access a SCRATCH_2 or PERSISTENT_1 - * file system from Amazon EC2 instances that support this feature. - * (Default = SCRATCH_1) - *

                                        - *

                                        Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 - * deployment types is supported when accessed from supported instance types in supported Amazon Web Services Regions. To learn more, - * Encrypting Data in Transit.

                                        + *

                                        Encryption of data in transit is automatically turned on when you access + * SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file + * systems from Amazon EC2 instances that support + * automatic encryption in the Amazon Web Services Regions where they are + * available. For more information about encryption in transit for FSx for Lustre + * file systems, see Encrypting data in + * transit in the Amazon FSx for Lustre User Guide.

                                        + *

                                        (Default = SCRATCH_1)

                                        */ DeploymentType?: LustreDeploymentType | string; /** - *

                                        (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. + *

                                        (Optional) Available with Scratch and Persistent_1 deployment types. When you + * create your file system, your existing S3 objects appear as file and directory listings. * Use this property to choose how Amazon FSx keeps your file and directory listings up to date * as you add or modify objects in your linked S3 bucket. AutoImportPolicy can * have the following values:

                                        - * *
                                          *
                                        • *

                                          @@ -2366,24 +3252,46 @@ export interface CreateFileSystemLustreConfiguration { *

                                          * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports * file and directory listings of any new objects added to the S3 bucket and any - * existing objects that are changed in the S3 bucket after you choose this option. - *

                                          + * existing objects that are changed in the S3 bucket after you choose this option.

                                          + *
                                        • + *
                                        • + *

                                          + * NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically + * imports file and directory listings of any new objects added to the S3 bucket, any + * existing objects that are changed in the S3 bucket, and any objects that were deleted + * in the S3 bucket.

                                          *
                                        • *
                                        - *

                                        For more information, see Automatically import updates from your S3 bucket.

                                        + *

                                        For more information, see + * Automatically import updates from your S3 bucket.

                                        + * + *

                                        This parameter is not supported for file systems with the Persistent_2 deployment type. + * Instead, use CreateDataRepositoryAssociation" to create + * a data repository association to link your Lustre file system to a data repository.

                                        + *
                                        */ AutoImportPolicy?: AutoImportPolicyType | string; /** - *

                                        - * Required for the PERSISTENT_1 deployment type, describes the amount of read and write - * throughput for each 1 tebibyte of storage, in MB/s/TiB. - * File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput - * (MB/s/TiB). For a 2.4 TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput - * yields 120 MB/s of file system throughput. You pay for the amount of throughput that you - * provision. - *

                                        - *

                                        Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: 12, 40.

                                        + *

                                        Required with PERSISTENT_1 and PERSISTENT_2 deployment + * types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of + * file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated + * by multiplying file system storage capacity (TiB) by the + * PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system, + * provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file + * system throughput. You pay for the amount of throughput that you provision.

                                        + *

                                        Valid values:

                                        + *
                                          + *
                                        • + *

                                          For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

                                          + *
                                        • + *
                                        • + *

                                          For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

                                          + *
                                        • + *
                                        • + *

                                          For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

                                          + *
                                        • + *
                                        */ PerUnitStorageThroughput?: number; @@ -2395,29 +3303,34 @@ export interface CreateFileSystemLustreConfiguration { DailyAutomaticBackupStartTime?: string; /** - *

                                        The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                        + *

                                        The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                        */ AutomaticBackupRetentionDays?: number; /** - *

                                        (Optional) Not available to use with file systems that are linked to a data repository. - * A boolean flag indicating whether tags for the file system should be copied to - * backups. The default value is false. If it's set to true, all file system - * tags are copied to all automatic and user-initiated backups when the user - * doesn't specify any backup-specific tags. If this value is true, and you specify one or more backup tags, only - * the specified tags are copied to backups. If you specify one or more tags when creating a - * user-initiated backup, no tags are copied from the file system, regardless of this value.

                                        - *

                                        For more information, see Working with backups.

                                        + *

                                        (Optional) Not available for use with file systems that are linked to a data + * repository. A boolean flag indicating whether tags for the file system should be copied + * to backups. The default value is false. If CopyTagsToBackups is set to + * true, all file system tags are copied to all automatic and user-initiated backups when + * the user doesn't specify any backup-specific tags. If + * CopyTagsToBackups is set to true and you specify one or more backup + * tags, only the specified tags are copied to backups. If you specify one or more tags + * when creating a user-initiated backup, no tags are copied from the file system, + * regardless of this value.

                                        + *

                                        (Default = false)

                                        + *

                                        For more information, see + * Working with backups in the Amazon FSx for Lustre User Guide.

                                        */ CopyTagsToBackups?: boolean; /** - *

                                        The type of drive cache used by PERSISTENT_1 file systems that are provisioned with - * HDD storage devices. This parameter is required when storage type is HDD. Set to - * READ, improve the performance for frequently accessed files and allows 20% - * of the total storage capacity of the file system to be cached.

                                        - *

                                        This parameter is required when StorageType is set to HDD.

                                        + *

                                        The type of drive cache used by PERSISTENT_1 file systems that are provisioned with + * HDD storage devices. This parameter is required when storage type is HDD. Set this property to + * READ to improve the performance for frequently accessed files by caching up to 20% + * of the total storage capacity of the file system.

                                        + *

                                        This parameter is required when StorageType is set to HDD.

                                        */ DriveCacheType?: DriveCacheType | string; @@ -2436,9 +3349,17 @@ export interface CreateFileSystemLustreConfiguration { * algorithm.

                                        * *
                                      - *

                                      For more information, see Lustre data compression.

                                      + *

                                      For more information, see Lustre data compression + * in the Amazon FSx for Lustre User Guide.

                                      */ DataCompressionType?: DataCompressionType | string; + + /** + *

                                      The Lustre logging configuration used when creating an Amazon FSx for Lustre + * file system. When logging is enabled, Lustre logs error and warning events for data repositories + * associated with your file system to Amazon CloudWatch Logs.

                                      + */ + LogConfiguration?: LustreLogCreateConfiguration; } export namespace CreateFileSystemLustreConfiguration { @@ -2451,12 +3372,13 @@ export namespace CreateFileSystemLustreConfiguration { } /** - *

                                      The ONTAP configuration properties of the FSx for NetApp ONTAP file system that you are creating.

                                      + *

                                      The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.

                                      */ export interface CreateFileSystemOntapConfiguration { /** - *

                                      The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                      + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      */ AutomaticBackupRetentionDays?: number; @@ -2468,7 +3390,8 @@ export interface CreateFileSystemOntapConfiguration { DailyAutomaticBackupStartTime?: string; /** - *

                                      Specifies the ONTAP file system deployment type to use in creating the file system.

                                      + *

                                      Specifies the FSx for ONTAP file system deployment type to use in creating the file system. + * MULTI_AZ_1 is the supported ONTAP deployment type.

                                      */ DeploymentType: OntapDeploymentType | string | undefined; @@ -2480,34 +3403,33 @@ export interface CreateFileSystemOntapConfiguration { EndpointIpAddressRange?: string; /** - *

                                      The ONTAP administrative password for the fsxadmin user that you can - * use to administer your file system using the ONTAP CLI and REST API.

                                      + *

                                      The ONTAP administrative password for the fsxadmin user with which you + * administer your file system using the NetApp ONTAP CLI and REST API.

                                      */ FsxAdminPassword?: string; /** - *

                                      The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system.

                                      + *

                                      The SSD IOPS configuration for the FSx for ONTAP file system.

                                      */ DiskIopsConfiguration?: DiskIopsConfiguration; /** - *

                                      The ID for a subnet. A subnet is a range of IP addresses in - * your virtual private cloud (VPC). For more information, see VPC and Subnets in the - * Amazon VPC User Guide. - *

                                      + *

                                      Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet + * in which you want the preferred file server to be located.

                                      */ PreferredSubnetId?: string; /** - *

                                      Specifies the VPC route tables in which your file system's endpoints will be - * created. You should specify all VPC route tables associated with the subnets - * in which your clients are located. By default, Amazon FSx selects your VPC's + *

                                      Specifies the virtual private cloud (VPC) route tables in which your file system's + * endpoints will be created. You should specify all VPC route tables associated with the + * subnets in which your clients are located. By default, Amazon FSx selects your VPC's * default route table.

                                      */ RouteTableIds?: string[]; /** - *

                                      Sustained throughput of an Amazon FSx file system in MBps.

                                      + *

                                      Sets the throughput capacity for the file system that you're creating. + * Valid values are 512, 1024, and 2048 MBps.

                                      */ ThroughputCapacity: number | undefined; @@ -2534,6 +3456,155 @@ export namespace CreateFileSystemOntapConfiguration { }); } +/** + *

                                      The configuration of an Amazon FSx for OpenZFS root volume.

                                      + */ +export interface OpenZFSCreateRootVolumeConfiguration { + /** + *

                                      Specifies the method used to compress the data on the volume. Unless the compression + * type is specified, volumes inherit the DataCompressionType value of their + * parent volume.

                                      + *
                                        + *
                                      • + *

                                        + * NONE - Doesn't compress the data on the volume.

                                        + *
                                      • + *
                                      • + *

                                        + * ZSTD - Compresses the data in the volume using the ZStandard + * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on + * your volume and has very little impact on compute resources.

                                        + *
                                      • + *
                                      + */ + DataCompressionType?: OpenZFSDataCompressionType | string; + + /** + *

                                      The configuration object for mounting a file system.

                                      + */ + NfsExports?: OpenZFSNfsExport[]; + + /** + *

                                      An object specifying how much storage users or groups can use on the volume.

                                      + */ + UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; + + /** + *

                                      A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true and you specify one or more tags, only the specified tags are + * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags + * are copied from the volume, regardless of this value.

                                      + */ + CopyTagsToSnapshots?: boolean; + + /** + *

                                      A Boolean value indicating whether the volume is read-only. Setting this value to + * true can be useful after you have completed changes to a volume and no + * longer want changes to occur.

                                      + */ + ReadOnly?: boolean; +} + +export namespace OpenZFSCreateRootVolumeConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenZFSCreateRootVolumeConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                                      The OpenZFS configuration properties for the file system that you are creating.

                                      + */ +export interface CreateFileSystemOpenZFSConfiguration { + /** + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      + */ + AutomaticBackupRetentionDays?: number; + + /** + *

                                      A Boolean value indicating whether tags for the file system should be copied to + * backups. This value defaults to false. If it's set to true, + * all tags for the file system are copied to all automatic and user-initiated backups + * where the user doesn't specify tags. If this value is true, and you specify + * one or more tags, only the specified tags are copied to backups. If you specify one or + * more tags when creating a user-initiated backup, no tags are copied from the file + * system, regardless of this value.

                                      + */ + CopyTagsToBackups?: boolean; + + /** + *

                                      A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true, and you specify one or more tags, only the specified tags + * are copied to snapshots. If you specify one or more tags when creating the snapshot, no + * tags are copied from the volume, regardless of this value.

                                      + */ + CopyTagsToVolumes?: boolean; + + /** + *

                                      A recurring daily time, in the format HH:MM. HH is the + * zero-padded hour of the day (0-23), and MM is the zero-padded minute of the + * hour. For example, 05:00 specifies 5 AM daily.

                                      + */ + DailyAutomaticBackupStartTime?: string; + + /** + *

                                      Specifies the file system deployment type. Amazon FSx for OpenZFS supports + * SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a + * single Availability Zone (AZ) of redundancy.

                                      + */ + DeploymentType: OpenZFSDeploymentType | string | undefined; + + /** + *

                                      Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second + * (MB/s). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s. + * You pay for additional throughput capacity that you provision.

                                      + */ + ThroughputCapacity: number | undefined; + + /** + *

                                      A recurring weekly time, in the format D:HH:MM.

                                      + *

                                      + * D is the day of the week, for which 1 represents Monday and 7 + * represents Sunday. For further details, see the ISO-8601 spec as described on Wikipedia.

                                      + *

                                      + * HH is the zero-padded hour of the day (0-23), and MM is + * the zero-padded minute of the hour.

                                      + *

                                      For example, 1:05:00 specifies maintenance at 5 AM Monday.

                                      + */ + WeeklyMaintenanceStartTime?: string; + + /** + *

                                      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The + * default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per + * GB of storage. The configuration consists of the total number of provisioned SSD IOPS + * and how the amount was provisioned (by the customer or by the system).

                                      + */ + DiskIopsConfiguration?: DiskIopsConfiguration; + + /** + *

                                      The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS + * file system. All volumes are children of the root volume.

                                      + */ + RootVolumeConfiguration?: OpenZFSCreateRootVolumeConfiguration; +} + +export namespace CreateFileSystemOpenZFSConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFileSystemOpenZFSConfiguration): any => ({ + ...obj, + }); +} + /** *

                                      The Windows file access auditing configuration used when creating * or updating an Amazon FSx for Windows File Server file system.

                                      @@ -2761,7 +3832,7 @@ export interface CreateFileSystemWindowsConfiguration { PreferredSubnetId?: string; /** - *

                                      The throughput of an Amazon FSx file system, measured in megabytes per second, in 2 to + *

                                      Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to * the nth increments, between 2^3 (8) and 2^11 (2048).

                                      */ ThroughputCapacity: number | undefined; @@ -2854,26 +3925,31 @@ export interface CreateFileSystemRequest { /** *

                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure * idempotent creation. This string is automatically filled on your behalf when you use the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      */ ClientRequestToken?: string; /** - *

                                      The type of Amazon FSx file system to create. Valid values are WINDOWS, - * LUSTRE, and ONTAP.

                                      + *

                                      The type of Amazon FSx file system to create. Valid values are + * WINDOWS, LUSTRE, ONTAP, and + * OPENZFS.

                                      */ FileSystemType: FileSystemType | string | undefined; /** - *

                                      Sets the storage capacity of the file system that you're creating.

                                      - *

                                      For Lustre file systems:

                                      + *

                                      Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

                                      + *

                                      + * FSx for Lustre file systems - The amount of + * storage capacity that you can configure depends on the value that you set for + * StorageType and the Lustre DeploymentType, as + * follows:

                                      *
                                        *
                                      • - *

                                        For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are - * 1200 GiB, 2400 GiB, and increments of 2400 GiB.

                                        + *

                                        For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types + * using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

                                        *
                                      • *
                                      • - *

                                        For PERSISTENT HDD file systems, valid values are increments of 6000 GiB for + *

                                        For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for * 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

                                        *
                                      • *
                                      • @@ -2881,31 +3957,34 @@ export interface CreateFileSystemRequest { * 1200 GiB, 2400 GiB, and increments of 3600 GiB.

                                        *
                                      • *
                                      - *

                                      For Windows file systems:

                                      + *

                                      + * FSx for ONTAP file systems - The amount of storage capacity + * that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).

                                      + *

                                      + * FSx for OpenZFS file systems - The amount of storage capacity that + * you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

                                      + *

                                      + * FSx for Windows File Server file systems - The amount + * of storage capacity that you can configure depends on the value that you set for + * StorageType as follows:

                                      *
                                        *
                                      • - *

                                        If StorageType=SSD, valid values are 32 GiB - 65,536 GiB (64 TiB).

                                        - *
                                      • - *
                                      • - *

                                        If StorageType=HDD, valid values are 2000 GiB - 65,536 GiB (64 TiB).

                                        + *

                                        For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

                                        *
                                      • - *
                                      - *

                                      For ONTAP file systems:

                                      - *
                                        *
                                      • - *

                                        Valid values are 1024 GiB - 196,608 GiB (192 TiB).

                                        + *

                                        For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

                                        *
                                      • *
                                      */ StorageCapacity: number | undefined; /** - *

                                      Sets the storage type for the file system you're creating. - * Valid values are SSD and HDD.

                                      + *

                                      Sets the storage type for the file system that you're creating. Valid values are + * SSD and HDD.

                                      *
                                        *
                                      • - *

                                        Set to SSD to use solid state drive storage. - * SSD is supported on all Windows, Lustre, and ONTAP deployment types.

                                        + *

                                        Set to SSD to use solid state drive storage. SSD is supported on all Windows, + * Lustre, ONTAP, and OpenZFS deployment types.

                                        *
                                      • *
                                      • *

                                        Set to HDD to use hard disk drive storage. @@ -2914,29 +3993,27 @@ export interface CreateFileSystemRequest { *

                                        *
                                      • *
                                      - *

                                      - * Default value is SSD. For more information, see - * - * Storage Type Options in the Amazon FSx for Windows User Guide and - * Multiple Storage Options - * in the Amazon FSx for Lustre User Guide. - *

                                      + *

                                      Default value is SSD. For more information, see Storage + * type options in the FSx for Windows File Server User + * Guide and Multiple storage + * options in the FSx for Lustre User + * Guide.

                                      */ StorageType?: StorageType | string; /** - *

                                      Specifies the IDs of the subnets that the file system will be accessible from. For Windows - * and ONTAP MULTI_AZ_1 file system deployment types, provide exactly two subnet IDs, - * one for the preferred file server and one for the standby file server. You specify one of these - * subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID - * or OntapConfiguration > PreferredSubnetID properties. For more information, - * see + *

                                      Specifies the IDs of the subnets that the file system will be accessible from. For + * Windows and ONTAP MULTI_AZ_1 deployment types,provide exactly two subnet + * IDs, one for the preferred file server and one for the standby file server. You specify + * one of these subnets as the preferred subnet using the WindowsConfiguration > + * PreferredSubnetID or OntapConfiguration > PreferredSubnetID + * properties. For more information about Multi-AZ file system configuration, see * Availability and durability: Single-AZ and Multi-AZ file systems in the - * Amazon FSx for Windows User Guide and - * - * Availability and durability in the - * Amazon FSx for ONTAP User Guide.

                                      - *

                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment types and Lustre file systems, provide exactly one subnet ID. + * Amazon FSx for Windows User Guide and + * Availability and durability in the Amazon FSx for ONTAP User + * Guide.

                                      + *

                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 and all Lustre + * deployment types, provide exactly one subnet ID. * The file server is launched in that subnet's Availability Zone.

                                      */ SubnetIds: string[] | undefined; @@ -2949,54 +4026,93 @@ export interface CreateFileSystemRequest { SecurityGroupIds?: string[]; /** - *

                                      The tags to apply to the file system being created. The key value of - * the Name tag appears in the console as the file system name.

                                      + *

                                      The tags to apply to the file system that's being created. The key value of the + * Name tag appears in the console as the file system name.

                                      */ Tags?: Tag[]; /** - *

                                      The ID of the Key Management Service (KMS) key used to encrypt the file system's data - * for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and - * Amazon FSx for Lustre PERSISTENT_1 file systems at rest. If not specified, the Amazon FSx - * managed key is used. The Amazon FSx for Lustre SCRATCH_1 and SCRATCH_2 file systems - * are always encrypted at rest using Amazon FSx managed keys. For more information, see Encrypt + *

                                      The ID of the Key Management Service (KMS) key used to encrypt the file + * system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and Amazon FSx for Lustre + * PERSISTENT_1 and PERSISTENT_2 file systems at rest. If this ID + * isn't specified, the key managed by Amazon FSx is used. The Amazon FSx for Lustre + * SCRATCH_1 and SCRATCH_2 file systems are always encrypted at + * rest using Amazon FSx-managed keys. For more information, see Encrypt * in the Key Management Service API Reference.

                                      */ KmsKeyId?: string; /** - *

                                      The Microsoft Windows configuration for the file system being created. - *

                                      + *

                                      The Microsoft Windows configuration for the file system that's being created.

                                      */ WindowsConfiguration?: CreateFileSystemWindowsConfiguration; /** - *

                                      The Lustre configuration for the file system being created. - *

                                      + *

                                      The Lustre configuration for the file system being created.

                                      + * + *

                                      The following parameters are not supported for file systems with the Persistent_2 + * deployment type. Instead, use CreateDataRepositoryAssociation + * to create a data repository association to link your Lustre file system to a data repository.

                                      + *
                                        + *
                                      • + *

                                        + * AutoImportPolicy + *

                                        + *
                                      • + *
                                      • + *

                                        + * ExportPath + *

                                        + *
                                      • + *
                                      • + *

                                        + * ImportedChunkSize + *

                                        + *
                                      • + *
                                      • + *

                                        + * ImportPath + *

                                        + *
                                      • + *
                                      + *
                                      */ LustreConfiguration?: CreateFileSystemLustreConfiguration; /** - *

                                      The ONTAP configuration properties of the FSx for NetApp ONTAP file system that you are creating.

                                      + *

                                      The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.

                                      */ OntapConfiguration?: CreateFileSystemOntapConfiguration; /** - *

                                      Sets the version of the Amazon FSx for Lustre file system you're creating. - * Valid values are 2.10 and 2.12.

                                      + *

                                      (Optional) For FSx for Lustre file systems, sets the Lustre version for the + * file system that you're creating. Valid values are 2.10 and + * 2.12:

                                      + * *
                                        *
                                      • - *

                                        Set the value to 2.10 to create a Lustre 2.10 - * file system.

                                        + *

                                        2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

                                        *
                                      • *
                                      • - *

                                        Set the value to 2.12 to create a Lustre 2.12 - * file system.

                                        + *

                                        2.12 is supported by all Lustre deployment types. 2.12 is + * required when setting FSx for Lustre DeploymentType to + * PERSISTENT_2.

                                        *
                                      • *
                                      - *

                                      Default value is 2.10.

                                      + *

                                      Default value = 2.10, except when DeploymentType is set to + * PERSISTENT_2, then the default is 2.12.

                                      + * + *

                                      If you set FileSystemTypeVersion to 2.10 for a + * PERSISTENT_2 Lustre deployment type, the CreateFileSystem + * operation fails.

                                      + *
                                      */ FileSystemTypeVersion?: string; + + /** + *

                                      The OpenZFS configuration for the file system that's being created.

                                      + */ + OpenZFSConfiguration?: CreateFileSystemOpenZFSConfiguration; } export namespace CreateFileSystemRequest { @@ -3140,14 +4256,14 @@ export namespace MissingFileSystemConfiguration { */ export interface CreateFileSystemFromBackupRequest { /** - *

                                      The ID of the source backup. Specifies the backup you are copying.

                                      + *

                                      The ID of the source backup. Specifies the backup that you are copying.

                                      */ BackupId: string | undefined; /** *

                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure * idempotent creation. This string is automatically filled on your behalf when you use the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      */ ClientRequestToken?: string; @@ -3156,16 +4272,16 @@ export interface CreateFileSystemFromBackupRequest { * file system deployment types, provide exactly two subnet IDs, one for the preferred file server * and one for the standby file server. You specify one of these subnets as the preferred subnet * using the WindowsConfiguration > PreferredSubnetID property.

                                      - *

                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 deployment - * types and Lustre file systems, provide exactly one subnet ID. - * The file server is launched in that subnet's Availability Zone.

                                      + *

                                      Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment + * types, Lustre file systems, and OpenZFS file systems provide exactly one subnet ID. The + * file server is launched in that subnet's Availability Zone.

                                      */ SubnetIds: string[] | undefined; /** - *

                                      A list of IDs for the security groups that apply to the specified network - * interfaces created for file system access. These security groups apply to all network - * interfaces. This value isn't returned in later DescribeFileSystem requests.

                                      + *

                                      A list of IDs for the security groups that apply to the specified network interfaces + * created for file system access. These security groups apply to all network interfaces. + * This value isn't returned in later DescribeFileSystem requests.

                                      */ SecurityGroupIds?: string[]; @@ -3182,57 +4298,85 @@ export interface CreateFileSystemFromBackupRequest { WindowsConfiguration?: CreateFileSystemWindowsConfiguration; /** - *

                                      The Lustre configuration for the file system being created. - *

                                      + *

                                      The Lustre configuration for the file system being created.

                                      + * + *

                                      The following parameters are not supported for file systems with the Persistent_2 + * deployment type. Instead, use CreateDataRepositoryAssociation + * to create a data repository association to link your Lustre file system to a data repository.

                                      + *
                                        + *
                                      • + *

                                        + * AutoImportPolicy + *

                                        + *
                                      • + *
                                      • + *

                                        + * ExportPath + *

                                        + *
                                      • + *
                                      • + *

                                        + * ImportedChunkSize + *

                                        + *
                                      • + *
                                      • + *

                                        + * ImportPath + *

                                        + *
                                      • + *
                                      + *
                                      */ LustreConfiguration?: CreateFileSystemLustreConfiguration; /** - *

                                      Sets the storage type for the Windows file system you're creating from a backup. - * Valid values are SSD and HDD.

                                      + *

                                      Sets the storage type for the Windows or OpenZFS file system that you're creating from + * a backup. Valid values are SSD and HDD.

                                      *
                                        *
                                      • - *

                                        Set to SSD to use solid state drive storage. - * Supported on all Windows deployment types.

                                        + *

                                        Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS + * deployment types.

                                        *
                                      • *
                                      • *

                                        Set to HDD to use hard disk drive storage. - * Supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types. - *

                                        + * HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

                                        *
                                      • *
                                      - *

                                      - * Default value is SSD. - *

                                      + *

                                      The default value is SSD.

                                      * *

                                      HDD and SSD storage types have different minimum storage capacity requirements. * A restored file system's storage capacity is tied to the file system that was backed up. * You can create a file system that uses HDD storage from a backup of a file system that - * used SSD storage only if the original SSD file system had a storage capacity of at least 2000 GiB. - *

                                      + * used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

                                      *
                                      */ StorageType?: StorageType | string; /** - *

                                      The ID of the Key Management Service (KMS) key used to encrypt the file system's data - * for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and - * Amazon FSx for Lustre PERSISTENT_1 file systems at rest. If not specified, the Amazon FSx - * managed key is used. The Amazon FSx for Lustre SCRATCH_1 and SCRATCH_2 file systems - * are always encrypted at rest using Amazon FSx managed keys. For more information, see Encrypt + *

                                      The ID of the Key Management Service (KMS) key used to encrypt the file + * system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and Amazon FSx for Lustre + * PERSISTENT_1 and PERSISTENT_2 file systems at rest. If this ID + * isn't specified, the key managed by Amazon FSx is used. The Amazon FSx for Lustre + * SCRATCH_1 and SCRATCH_2 file systems are always encrypted at + * rest using Amazon FSx-managed keys. For more information, see Encrypt * in the Key Management Service API Reference.

                                      */ KmsKeyId?: string; /** - *

                                      Sets the version for the Amazon FSx for Lustre file system you're creating from a backup. - * Valid values are 2.10 and 2.12.

                                      + *

                                      Sets the version for the Amazon FSx for Lustre file system that you're + * creating from a backup. Valid values are 2.10 and 2.12.

                                      *

                                      You don't need to specify FileSystemTypeVersion because it will * be applied using the backup's FileSystemTypeVersion setting. * If you choose to specify FileSystemTypeVersion when creating from backup, the * value must match the backup's FileSystemTypeVersion setting.

                                      */ FileSystemTypeVersion?: string; + + /** + *

                                      The OpenZFS configuration for the file system that's being created.

                                      + */ + OpenZFSConfiguration?: CreateFileSystemOpenZFSConfiguration; } export namespace CreateFileSystemFromBackupRequest { @@ -3247,6 +4391,39 @@ export namespace CreateFileSystemFromBackupRequest { }); } +export interface CreateSnapshotRequest { + /** + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + */ + ClientRequestToken?: string; + + /** + *

                                      The name of the snapshot.

                                      + */ + Name: string | undefined; + + /** + *

                                      The ID of the volume that you are taking a snapshot of.

                                      + */ + VolumeId: string | undefined; + + /** + *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + */ + Tags?: Tag[]; +} + +export namespace CreateSnapshotRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateSnapshotRequest): any => ({ + ...obj, + }); +} + /** *

                                      The configuration that Amazon FSx uses to join the ONTAP storage virtual machine * (SVM) to your self-managed (including on-premises) Microsoft Active Directory (AD) directory.

                                      @@ -3669,56 +4846,168 @@ export namespace CreateOntapVolumeConfiguration { }); } -export interface CreateVolumeRequest { +/** + *

                                      The snapshot configuration to use when creating an OpenZFS volume from a snapshot.

                                      + */ +export interface CreateOpenZFSOriginSnapshotConfiguration { /** - *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 - * ASCII characters. This token is automatically filled on your behalf when you use the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      - */ - ClientRequestToken?: string; + *

                                      The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services + * resources. We require an ARN when you need to specify a resource unambiguously across + * all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in + * the Amazon Web Services General Reference.

                                      + */ + SnapshotARN: string | undefined; /** - *

                                      Specifies the type of volume to create; ONTAP is the only valid volume type.

                                      + *

                                      The strategy used when copying data from the snapshot to the new volume.

                                      + *
                                        + *
                                      • + *

                                        + * CLONE - The new volume references the data in the origin + * snapshot. Cloning a snapshot is faster than copying data from the snapshot to a + * new volume and doesn't consume disk throughput. However, the origin snapshot + * can't be deleted if there is a volume using its copied data.

                                        + *
                                      • + *
                                      • + *

                                        + * FULL_COPY - Copies all data from the snapshot to the new volume. + *

                                        + *
                                      • + *
                                      */ - VolumeType: VolumeType | string | undefined; + CopyStrategy: OpenZFSCopyStrategy | string | undefined; +} +export namespace CreateOpenZFSOriginSnapshotConfiguration { /** - *

                                      Specifies the name of the volume you're creating.

                                      + * @internal */ - Name: string | undefined; + export const filterSensitiveLog = (obj: CreateOpenZFSOriginSnapshotConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                                      Specifies the configuration of the OpenZFS volume that you are creating.

                                      + */ +export interface CreateOpenZFSVolumeConfiguration { + /** + *

                                      The ID of the volume to use as the parent volume.

                                      + */ + ParentVolumeId: string | undefined; /** - *

                                      Specifies the ONTAP configuration to use in creating the volume.

                                      + *

                                      The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't + * reserve more storage than the parent volume has reserved.

                                      */ - OntapConfiguration?: CreateOntapVolumeConfiguration; + StorageCapacityReservationGiB?: number; /** - *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + *

                                      The maximum amount of storage in gibibytes (GiB) that the volume can use from its + * parent. You can specify a quota larger than the storage on the parent volume.

                                      */ - Tags?: Tag[]; + StorageCapacityQuotaGiB?: number; + + /** + *

                                      Specifies the method used to compress the data on the volume. Unless the compression + * type is specified, volumes inherit the DataCompressionType value of their + * parent volume.

                                      + *
                                        + *
                                      • + *

                                        + * NONE - Doesn't compress the data on the volume.

                                        + *
                                      • + *
                                      • + *

                                        + * ZSTD - Compresses the data in the volume using the Zstandard + * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on + * your volume and has very little impact on compute resources.

                                        + *
                                      • + *
                                      + */ + DataCompressionType?: OpenZFSDataCompressionType | string; + + /** + *

                                      A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true, and you specify one or more tags, only the specified tags + * are copied to snapshots. If you specify one or more tags when creating the snapshot, no + * tags are copied from the volume, regardless of this value.

                                      + */ + CopyTagsToSnapshots?: boolean; + + /** + *

                                      The configuration object that specifies the snapshot to use as the origin of the data + * for the volume.

                                      + */ + OriginSnapshot?: CreateOpenZFSOriginSnapshotConfiguration; + + /** + *

                                      A Boolean value indicating whether the volume is read-only.

                                      + */ + ReadOnly?: boolean; + + /** + *

                                      The configuration object for mounting a Network File System (NFS) file system.

                                      + */ + NfsExports?: OpenZFSNfsExport[]; + + /** + *

                                      An object specifying how much storage users or groups can use on the volume.

                                      + */ + UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; } -export namespace CreateVolumeRequest { +export namespace CreateOpenZFSVolumeConfiguration { /** * @internal */ - export const filterSensitiveLog = (obj: CreateVolumeRequest): any => ({ + export const filterSensitiveLog = (obj: CreateOpenZFSVolumeConfiguration): any => ({ ...obj, }); } -export interface CreateVolumeResponse { +export interface CreateVolumeRequest { /** - *

                                      Returned after a successful CreateVolume API operation, describing the volume just created.

                                      + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      */ - Volume?: Volume; + ClientRequestToken?: string; + + /** + *

                                      Specifies the type of volume to create; ONTAP and OPENZFS are + * the only valid volume types.

                                      + */ + VolumeType: VolumeType | string | undefined; + + /** + *

                                      Specifies the name of the volume that you're creating.

                                      + */ + Name: string | undefined; + + /** + *

                                      Specifies the configuration to use when creating the ONTAP volume.

                                      + */ + OntapConfiguration?: CreateOntapVolumeConfiguration; + + /** + *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + */ + Tags?: Tag[]; + + /** + *

                                      Specifies the configuration to use when creating the OpenZFS volume.

                                      + */ + OpenZFSConfiguration?: CreateOpenZFSVolumeConfiguration; } -export namespace CreateVolumeResponse { +export namespace CreateVolumeRequest { /** * @internal */ - export const filterSensitiveLog = (obj: CreateVolumeResponse): any => ({ + export const filterSensitiveLog = (obj: CreateVolumeRequest): any => ({ ...obj, }); } @@ -3767,7 +5056,7 @@ export namespace StorageVirtualMachineNotFound { export interface CreateVolumeFromBackupRequest { /** - *

                                      The ID of the source backup. Specifies the backup you are copying.

                                      + *

                                      The ID of the source backup. Specifies the backup that you are copying.

                                      */ BackupId: string | undefined; @@ -3803,23 +5092,6 @@ export namespace CreateVolumeFromBackupRequest { }); } -export interface CreateVolumeFromBackupResponse { - /** - *

                                      Returned after a successful CreateVolumeFromBackup API operation, - * describing the volume just created.

                                      - */ - Volume?: Volume; -} - -export namespace CreateVolumeFromBackupResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateVolumeFromBackupResponse): any => ({ - ...obj, - }); -} - /** *

                                      You can't delete a backup while it's being copied.

                                      */ @@ -3832,7 +5104,7 @@ export interface BackupBeingCopied extends __SmithyException, $MetadataBearer { Message?: string; /** - *

                                      The ID of the source backup. Specifies the backup you are copying.

                                      + *

                                      The ID of the source backup. Specifies the backup that you are copying.

                                      */ BackupId?: string; } @@ -3874,17 +5146,17 @@ export namespace BackupRestoring { } /** - *

                                      The request object for DeleteBackup operation.

                                      + *

                                      The request object for the DeleteBackup operation.

                                      */ export interface DeleteBackupRequest { /** - *

                                      The ID of the backup you want to delete.

                                      + *

                                      The ID of the backup that you want to delete.

                                      */ BackupId: string | undefined; /** *

                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure - * idempotent deletion. This is automatically filled on your behalf when using + * idempotent deletion. This parameter is automatically filled on your behalf when using * the CLI or SDK.

                                      */ ClientRequestToken?: string; @@ -3900,16 +5172,17 @@ export namespace DeleteBackupRequest { } /** - *

                                      The response object for DeleteBackup operation.

                                      + *

                                      The response object for the DeleteBackup operation.

                                      */ export interface DeleteBackupResponse { /** - *

                                      The ID of the backup deleted.

                                      + *

                                      The ID of the backup that was deleted.

                                      */ BackupId?: string; /** - *

                                      The lifecycle of the backup. Should be DELETED.

                                      + *

                                      The lifecycle status of the backup. If the DeleteBackup operation is + * successful, the status is DELETED.

                                      */ Lifecycle?: BackupLifecycle | string; } @@ -3923,6 +5196,83 @@ export namespace DeleteBackupResponse { }); } +/** + *

                                      No data repository associations were found based upon the supplied parameters.

                                      + */ +export interface DataRepositoryAssociationNotFound extends __SmithyException, $MetadataBearer { + name: "DataRepositoryAssociationNotFound"; + $fault: "client"; + /** + *

                                      A detailed error message.

                                      + */ + Message?: string; +} + +export namespace DataRepositoryAssociationNotFound { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataRepositoryAssociationNotFound): any => ({ + ...obj, + }); +} + +export interface DeleteDataRepositoryAssociationRequest { + /** + *

                                      The ID of the data repository association that you want to delete.

                                      + */ + AssociationId: string | undefined; + + /** + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + */ + ClientRequestToken?: string; + + /** + *

                                      Set to true to delete the data in the file system that corresponds + * to the data repository association.

                                      + */ + DeleteDataInFileSystem: boolean | undefined; +} + +export namespace DeleteDataRepositoryAssociationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteDataRepositoryAssociationRequest): any => ({ + ...obj, + }); +} + +export interface DeleteDataRepositoryAssociationResponse { + /** + *

                                      The ID of the data repository association being deleted.

                                      + */ + AssociationId?: string; + + /** + *

                                      Describes the lifecycle state of the data repository association being deleted.

                                      + */ + Lifecycle?: DataRepositoryLifecycle | string; + + /** + *

                                      Indicates whether data in the file system that corresponds to the data + * repository association is being deleted. Default is false.

                                      + */ + DeleteDataInFileSystem?: boolean; +} + +export namespace DeleteDataRepositoryAssociationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteDataRepositoryAssociationResponse): any => ({ + ...obj, + }); +} + /** *

                                      The configuration object for the Amazon FSx for Lustre file system being deleted in the * DeleteFileSystem operation.

                                      @@ -3932,6 +5282,9 @@ export interface DeleteFileSystemLustreConfiguration { *

                                      Set SkipFinalBackup to false if you want to take a final backup of the file * system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the * DeleteFileSystem operation is invoked. (Default = true)

                                      + * + *

                                      The fsx:CreateBackup permission is required if you set SkipFinalBackup to false in order to delete the file system and take a final backup.

                                      + *
                                      */ SkipFinalBackup?: boolean; @@ -3953,6 +5306,36 @@ export namespace DeleteFileSystemLustreConfiguration { }); } +/** + *

                                      The configuration object for the OpenZFS file system used in the + * DeleteFileSystem operation.

                                      + */ +export interface DeleteFileSystemOpenZFSConfiguration { + /** + *

                                      By default, Amazon FSx for OpenZFS takes a final backup on your behalf when + * the DeleteFileSystem operation is invoked. Doing this helps protect you + * from data loss, and we highly recommend taking the final backup. If you want to skip + * this backup, use this + * value + * to do so.

                                      + */ + SkipFinalBackup?: boolean; + + /** + *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + */ + FinalBackupTags?: Tag[]; +} + +export namespace DeleteFileSystemOpenZFSConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFileSystemOpenZFSConfiguration): any => ({ + ...obj, + }); +} + /** *

                                      The configuration object for the Microsoft Windows file system used in the * DeleteFileSystem operation.

                                      @@ -3986,14 +5369,14 @@ export namespace DeleteFileSystemWindowsConfiguration { */ export interface DeleteFileSystemRequest { /** - *

                                      The ID of the file system you want to delete.

                                      + *

                                      The ID of the file system that you want to delete.

                                      */ FileSystemId: string | undefined; /** *

                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure - * idempotent deletion. This is automatically filled on your behalf when using the - * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + * idempotent deletion. This token is automatically filled on your behalf when using the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      */ ClientRequestToken?: string; @@ -4008,6 +5391,12 @@ export interface DeleteFileSystemRequest { * DeleteFileSystem operation.

                                      */ LustreConfiguration?: DeleteFileSystemLustreConfiguration; + + /** + *

                                      The configuration object for the OpenZFS file system used in the + * DeleteFileSystem operation.

                                      + */ + OpenZFSConfiguration?: DeleteFileSystemOpenZFSConfiguration; } export namespace DeleteFileSystemRequest { @@ -4044,6 +5433,31 @@ export namespace DeleteFileSystemLustreResponse { }); } +/** + *

                                      The response object for the Amazon FSx for OpenZFS file system that's being + * deleted in the DeleteFileSystem operation.

                                      + */ +export interface DeleteFileSystemOpenZFSResponse { + /** + *

                                      The ID of the source backup. Specifies the backup that you are copying.

                                      + */ + FinalBackupId?: string; + + /** + *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + */ + FinalBackupTags?: Tag[]; +} + +export namespace DeleteFileSystemOpenZFSResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFileSystemOpenZFSResponse): any => ({ + ...obj, + }); +} + /** *

                                      The response object for the Microsoft Windows file system used in the * DeleteFileSystem operation.

                                      @@ -4074,13 +5488,14 @@ export namespace DeleteFileSystemWindowsResponse { */ export interface DeleteFileSystemResponse { /** - *

                                      The ID of the file system being deleted.

                                      + *

                                      The ID of the file system that's being deleted.

                                      */ FileSystemId?: string; /** - *

                                      The file system lifecycle for the deletion request. Should be - * DELETING.

                                      + *

                                      The file system lifecycle for the deletion request. If the + * DeleteFileSystem operation is successful, this status is + * DELETING.

                                      */ Lifecycle?: FileSystemLifecycle | string; @@ -4095,6 +5510,12 @@ export interface DeleteFileSystemResponse { * DeleteFileSystem operation.

                                      */ LustreResponse?: DeleteFileSystemLustreResponse; + + /** + *

                                      The response object for the OpenZFS file system that's being deleted in the + * DeleteFileSystem operation.

                                      + */ + OpenZFSResponse?: DeleteFileSystemOpenZFSResponse; } export namespace DeleteFileSystemResponse { @@ -4106,7 +5527,7 @@ export namespace DeleteFileSystemResponse { }); } -export interface DeleteStorageVirtualMachineRequest { +export interface DeleteSnapshotRequest { /** *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 * ASCII characters. This token is automatically filled on your behalf when you use the @@ -4115,67 +5536,64 @@ export interface DeleteStorageVirtualMachineRequest { ClientRequestToken?: string; /** - *

                                      The ID of the SVM that you want to delete.

                                      + *

                                      The ID of the snapshot that you want to delete.

                                      */ - StorageVirtualMachineId: string | undefined; + SnapshotId: string | undefined; } -export namespace DeleteStorageVirtualMachineRequest { +export namespace DeleteSnapshotRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteStorageVirtualMachineRequest): any => ({ + export const filterSensitiveLog = (obj: DeleteSnapshotRequest): any => ({ ...obj, }); } -export interface DeleteStorageVirtualMachineResponse { +export interface DeleteSnapshotResponse { /** - *

                                      The ID of the SVM Amazon FSx is deleting.

                                      + *

                                      The ID of the deleted snapshot.

                                      */ - StorageVirtualMachineId?: string; + SnapshotId?: string; /** - *

                                      Describes the lifecycle state of the SVM being deleted.

                                      + *

                                      The lifecycle status of the snapshot. If the DeleteSnapshot operation is + * successful, this status is DELETING.

                                      */ - Lifecycle?: StorageVirtualMachineLifecycle | string; + Lifecycle?: SnapshotLifecycle | string; } -export namespace DeleteStorageVirtualMachineResponse { +export namespace DeleteSnapshotResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteStorageVirtualMachineResponse): any => ({ + export const filterSensitiveLog = (obj: DeleteSnapshotResponse): any => ({ ...obj, }); } /** - *

                                      Use to specify skipping a final backup, or to add tags to a final backup.

                                      + *

                                      No Amazon FSx snapshots were found based on the supplied parameters.

                                      */ -export interface DeleteVolumeOntapConfiguration { - /** - *

                                      Set to true if you want to skip taking a final backup of the volume - * you are deleting.

                                      - */ - SkipFinalBackup?: boolean; - +export interface SnapshotNotFound extends __SmithyException, $MetadataBearer { + name: "SnapshotNotFound"; + $fault: "client"; /** - *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + *

                                      A detailed error message.

                                      */ - FinalBackupTags?: Tag[]; + Message?: string; } -export namespace DeleteVolumeOntapConfiguration { +export namespace SnapshotNotFound { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteVolumeOntapConfiguration): any => ({ + export const filterSensitiveLog = (obj: SnapshotNotFound): any => ({ ...obj, }); } -export interface DeleteVolumeRequest { +export interface DeleteStorageVirtualMachineRequest { /** *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 * ASCII characters. This token is automatically filled on your behalf when you use the @@ -4184,33 +5602,133 @@ export interface DeleteVolumeRequest { ClientRequestToken?: string; /** - *

                                      The ID of the volume you are deleting.

                                      - */ - VolumeId: string | undefined; - - /** - *

                                      For Amazon FSx for ONTAP volumes, specify whether to take - * a final backup of the volume, and apply tags to the backup.

                                      + *

                                      The ID of the SVM that you want to delete.

                                      */ - OntapConfiguration?: DeleteVolumeOntapConfiguration; + StorageVirtualMachineId: string | undefined; } -export namespace DeleteVolumeRequest { +export namespace DeleteStorageVirtualMachineRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteVolumeRequest): any => ({ + export const filterSensitiveLog = (obj: DeleteStorageVirtualMachineRequest): any => ({ ...obj, }); } -/** - *

                                      The response object for the Amazon FSx for NetApp ONTAP volume being deleted - * in the DeleteVolume operation.

                                      +export interface DeleteStorageVirtualMachineResponse { + /** + *

                                      The ID of the SVM Amazon FSx is deleting.

                                      + */ + StorageVirtualMachineId?: string; + + /** + *

                                      Describes the lifecycle state of the SVM being deleted.

                                      + */ + Lifecycle?: StorageVirtualMachineLifecycle | string; +} + +export namespace DeleteStorageVirtualMachineResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteStorageVirtualMachineResponse): any => ({ + ...obj, + }); +} + +/** + *

                                      Use to specify skipping a final backup, or to add tags to a final backup.

                                      + */ +export interface DeleteVolumeOntapConfiguration { + /** + *

                                      Set to true if you want to skip taking a final backup of the volume + * you are deleting.

                                      + */ + SkipFinalBackup?: boolean; + + /** + *

                                      A list of Tag values, with a maximum of 50 elements.

                                      + */ + FinalBackupTags?: Tag[]; +} + +export namespace DeleteVolumeOntapConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteVolumeOntapConfiguration): any => ({ + ...obj, + }); +} + +export enum DeleteOpenZFSVolumeOption { + DELETE_CHILD_VOLUMES_AND_SNAPSHOTS = "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS", +} + +/** + *

                                      A value that specifies whether to delete all child volumes and snapshots.

                                      + */ +export interface DeleteVolumeOpenZFSConfiguration { + /** + *

                                      To delete the volume's children and snapshots, use the string + * DELETE_CHILD_VOLUMES_AND_SNAPSHOTS.

                                      + */ + Options?: (DeleteOpenZFSVolumeOption | string)[]; +} + +export namespace DeleteVolumeOpenZFSConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteVolumeOpenZFSConfiguration): any => ({ + ...obj, + }); +} + +export interface DeleteVolumeRequest { + /** + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + */ + ClientRequestToken?: string; + + /** + *

                                      The ID of the volume that you are deleting.

                                      + */ + VolumeId: string | undefined; + + /** + *

                                      For Amazon FSx for ONTAP volumes, specify whether to take a final backup of + * the volume and apply tags to the backup. To apply tags to the backup, you must have the + * fsx:TagResource permission.

                                      + */ + OntapConfiguration?: DeleteVolumeOntapConfiguration; + + /** + *

                                      For Amazon FSx for OpenZFS volumes, specify whether to delete all child + * volumes and snapshots.

                                      + */ + OpenZFSConfiguration?: DeleteVolumeOpenZFSConfiguration; +} + +export namespace DeleteVolumeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteVolumeRequest): any => ({ + ...obj, + }); +} + +/** + *

                                      The response object for the Amazon FSx for NetApp ONTAP volume being deleted + * in the DeleteVolume operation.

                                      */ export interface DeleteVolumeOntapResponse { /** - *

                                      The ID of the source backup. Specifies the backup you are copying.

                                      + *

                                      The ID of the source backup. Specifies the backup that you are copying.

                                      */ FinalBackupId?: string; @@ -4231,18 +5749,19 @@ export namespace DeleteVolumeOntapResponse { export interface DeleteVolumeResponse { /** - *

                                      The ID of the volume being deleted.

                                      + *

                                      The ID of the volume that's being deleted.

                                      */ VolumeId?: string; /** - *

                                      Describes the lifecycle state of the volume being deleted.

                                      + *

                                      The lifecycle state of the volume being deleted. If the DeleteVolume + * operation is successful, this value is DELETING.

                                      */ Lifecycle?: VolumeLifecycle | string; /** - *

                                      Returned after a DeleteVolume request, showing the status of the delete request. - *

                                      + *

                                      Returned after a DeleteVolume request, showing the status of the delete + * request.

                                      */ OntapResponse?: DeleteVolumeOntapResponse; } @@ -4258,6 +5777,7 @@ export namespace DeleteVolumeResponse { export enum FilterName { BACKUP_TYPE = "backup-type", + DATA_REPOSITORY_TYPE = "data-repository-type", FILE_SYSTEM_ID = "file-system-id", FILE_SYSTEM_TYPE = "file-system-type", VOLUME_ID = "volume-id", @@ -4290,34 +5810,34 @@ export namespace Filter { } /** - *

                                      The request object for DescribeBackups operation.

                                      + *

                                      The request object for the DescribeBackups operation.

                                      */ export interface DescribeBackupsRequest { /** - *

                                      IDs of the backups you want to retrieve (String). This overrides any - * filters. If any IDs are not found, BackupNotFound will be thrown.

                                      + *

                                      The IDs of the backups that you want to retrieve. This parameter value overrides any + * filters. If any IDs aren't found, a BackupNotFound error occurs.

                                      */ BackupIds?: string[]; /** - *

                                      Filters structure. Supported names are file-system-id, - * backup-type, file-system-type, and + *

                                      The filters structure. The supported names are file-system-id, + * backup-type, file-system-type, and * volume-id.

                                      */ Filters?: Filter[]; /** - *

                                      Maximum number of backups to return in the response (integer). This - * parameter value must be greater than 0. The number of items that Amazon FSx returns is - * the minimum of the MaxResults parameter specified in the request and the - * service's internal maximum number of items per page.

                                      + *

                                      Maximum number of backups to return in the response. This parameter value must be + * greater than 0. The number of items that Amazon FSx returns is the minimum of + * the MaxResults parameter specified in the request and the service's + * internal maximum number of items per page.

                                      */ MaxResults?: number; /** - *

                                      Opaque pagination token returned from a previous - * DescribeBackups operation (String). If a token present, the action - * continues the list from where the returning call left off.

                                      + *

                                      An opaque pagination token returned from a previous DescribeBackups + * operation. If a token is present, the operation continues the list from where the + * returning call left off.

                                      */ NextToken?: string; } @@ -4331,7 +5851,87 @@ export namespace DescribeBackupsRequest { }); } +export interface DescribeDataRepositoryAssociationsRequest { + /** + *

                                      IDs of the data repository associations whose descriptions you want to retrieve + * (String).

                                      + */ + AssociationIds?: string[]; + + /** + *

                                      A list of Filter elements.

                                      + */ + Filters?: Filter[]; + + /** + *

                                      The maximum number of resources to return in the response. This value must be + * an integer greater than zero.

                                      + */ + MaxResults?: number; + + /** + *

                                      (Optional) Opaque pagination token returned from a previous operation (String). If + * present, this token indicates from what point you can continue processing the request, where + * the previous NextToken value left off.

                                      + */ + NextToken?: string; +} + +export namespace DescribeDataRepositoryAssociationsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeDataRepositoryAssociationsRequest): any => ({ + ...obj, + }); +} + +export interface DescribeDataRepositoryAssociationsResponse { + /** + *

                                      An array of one ore more data repository association descriptions.

                                      + */ + Associations?: DataRepositoryAssociation[]; + + /** + *

                                      (Optional) Opaque pagination token returned from a previous operation (String). If + * present, this token indicates from what point you can continue processing the request, where + * the previous NextToken value left off.

                                      + */ + NextToken?: string; +} + +export namespace DescribeDataRepositoryAssociationsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeDataRepositoryAssociationsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                      You have filtered the response to a data repository type that is not supported.

                                      + */ +export interface InvalidDataRepositoryType extends __SmithyException, $MetadataBearer { + name: "InvalidDataRepositoryType"; + $fault: "client"; + /** + *

                                      A detailed error message.

                                      + */ + Message?: string; +} + +export namespace InvalidDataRepositoryType { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidDataRepositoryType): any => ({ + ...obj, + }); +} + export enum DataRepositoryTaskFilterName { + DATA_REPO_ASSOCIATION_ID = "data-repository-association-id", FILE_SYSTEM_ID = "file-system-id", TASK_LIFECYCLE = "task-lifecycle", } @@ -4518,9 +6118,9 @@ export interface DescribeFileSystemsRequest { MaxResults?: number; /** - *

                                      Opaque pagination token returned from a previous - * DescribeFileSystems operation (String). If a token present, the action - * continues the list from where the returning call left off.

                                      + *

                                      Opaque pagination token returned from a previous DescribeFileSystems + * operation (String). If a token present, the operation continues the list from where the + * returning call left off.

                                      */ NextToken?: string; } @@ -4534,6 +6134,74 @@ export namespace DescribeFileSystemsRequest { }); } +export enum SnapshotFilterName { + FILE_SYSTEM_ID = "file-system-id", + VOLUME_ID = "volume-id", +} + +/** + *

                                      A filter used to restrict the results of DescribeSnapshots calls. You can + * use multiple filters to return results that meet all applied filter requirements.

                                      + */ +export interface SnapshotFilter { + /** + *

                                      The name of the filter to use. You can filter by the file-system-id or by + * volume-id.

                                      + */ + Name?: SnapshotFilterName | string; + + /** + *

                                      The file-system-id or volume-id that you are filtering + * for.

                                      + */ + Values?: string[]; +} + +export namespace SnapshotFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SnapshotFilter): any => ({ + ...obj, + }); +} + +export interface DescribeSnapshotsRequest { + /** + *

                                      The IDs of the snapshots that you want to retrieve. This parameter value overrides any + * filters. If any IDs aren't found, a SnapshotNotFound error occurs.

                                      + */ + SnapshotIds?: string[]; + + /** + *

                                      The filters structure. The supported names are file-system-id or + * volume-id.

                                      + */ + Filters?: SnapshotFilter[]; + + /** + *

                                      The maximum number of resources to return in the response. This value must be an + * integer greater than zero.

                                      + */ + MaxResults?: number; + + /** + *

                                      (Optional) Opaque pagination token returned from a previous operation (String). If + * present, this token indicates from what point you can continue processing the request, where + * the previous NextToken value left off.

                                      + */ + NextToken?: string; +} + +export namespace DescribeSnapshotsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeSnapshotsRequest): any => ({ + ...obj, + }); +} + export enum StorageVirtualMachineFilterName { FILE_SYSTEM_ID = "file-system-id", } @@ -4628,9 +6296,9 @@ export enum VolumeFilterName { } /** - *

                                      A filter used to restrict the results of describe calls for - * Amazon FSx for NetApp ONTAP volumes. You can use multiple - * filters to return results that meet all applied filter requirements.

                                      + *

                                      A filter used to restrict the results of describe calls for Amazon FSx for + * NetApp ONTAP or Amazon FSx for OpenZFS volumes. You can use multiple filters to + * return results that meet all applied filter requirements.

                                      */ export interface VolumeFilter { /** @@ -4656,12 +6324,13 @@ export namespace VolumeFilter { export interface DescribeVolumesRequest { /** - *

                                      IDs of the volumes whose descriptions you want to retrieve.

                                      + *

                                      The IDs of the volumes whose descriptions you want to retrieve.

                                      */ VolumeIds?: string[]; /** - *

                                      Enter a filter name:value pair to view a select set of volumes.

                                      + *

                                      Enter a filter Name and Values pair to view a select set of + * volumes.

                                      */ Filters?: VolumeFilter[]; @@ -4688,29 +6357,6 @@ export namespace DescribeVolumesRequest { }); } -export interface DescribeVolumesResponse { - /** - *

                                      Returned after a successful DescribeVolumes operation, describing each volume.

                                      - */ - Volumes?: Volume[]; - - /** - *

                                      (Optional) Opaque pagination token returned from a previous operation (String). If - * present, this token indicates from what point you can continue processing the request, where - * the previous NextToken value left off.

                                      - */ - NextToken?: string; -} - -export namespace DescribeVolumesResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeVolumesResponse): any => ({ - ...obj, - }); -} - /** *

                                      The request object of DNS aliases to disassociate from an Amazon FSx for Windows File Server file system.

                                      */ @@ -4902,35 +6548,132 @@ export namespace ResourceNotFound { }); } -/** - *

                                      The request object for the TagResource operation.

                                      - */ -export interface TagResourceRequest { +export interface ReleaseFileSystemNfsV3LocksRequest { /** - *

                                      The Amazon Resource Name (ARN) of the Amazon FSx resource that you want to - * tag.

                                      + *

                                      The globally unique ID of the file system, assigned by Amazon FSx.

                                      */ - ResourceARN: string | undefined; + FileSystemId: string | undefined; /** - *

                                      A list of tags for the resource. If a tag with a given key already exists, the - * value is replaced by the one specified in this parameter.

                                      + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      */ - Tags: Tag[] | undefined; + ClientRequestToken?: string; } -export namespace TagResourceRequest { +export namespace ReleaseFileSystemNfsV3LocksRequest { /** * @internal */ - export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + export const filterSensitiveLog = (obj: ReleaseFileSystemNfsV3LocksRequest): any => ({ ...obj, }); } -/** - *

                                      The response object for the TagResource operation.

                                      - */ +export enum RestoreOpenZFSVolumeOption { + DELETE_CLONED_VOLUMES = "DELETE_CLONED_VOLUMES", + DELETE_INTERMEDIATE_SNAPSHOTS = "DELETE_INTERMEDIATE_SNAPSHOTS", +} + +export interface RestoreVolumeFromSnapshotRequest { + /** + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + */ + ClientRequestToken?: string; + + /** + *

                                      The ID of the volume that you are restoring.

                                      + */ + VolumeId: string | undefined; + + /** + *

                                      The ID of the source snapshot. Specifies the snapshot that you are restoring + * from.

                                      + */ + SnapshotId: string | undefined; + + /** + *

                                      The settings used when restoring the specified volume from snapshot.

                                      + *
                                        + *
                                      • + *

                                        + * DELETE_INTERMEDIATE_SNAPSHOTS - Deletes snapshots between the + * current state and the specified snapshot. If there are intermediate snapshots + * and this option isn't used, RestoreVolumeFromSnapshot fails.

                                        + *
                                      • + *
                                      • + *

                                        + * DELETE_CLONED_VOLUMES - Deletes any volumes cloned from this + * volume. If there are any cloned volumes and this option isn't used, + * RestoreVolumeFromSnapshot fails.

                                        + *
                                      • + *
                                      + */ + Options?: (RestoreOpenZFSVolumeOption | string)[]; +} + +export namespace RestoreVolumeFromSnapshotRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreVolumeFromSnapshotRequest): any => ({ + ...obj, + }); +} + +export interface RestoreVolumeFromSnapshotResponse { + /** + *

                                      The ID of the volume that you restored.

                                      + */ + VolumeId?: string; + + /** + *

                                      The lifecycle state of the volume being restored.

                                      + */ + Lifecycle?: VolumeLifecycle | string; +} + +export namespace RestoreVolumeFromSnapshotResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RestoreVolumeFromSnapshotResponse): any => ({ + ...obj, + }); +} + +/** + *

                                      The request object for the TagResource operation.

                                      + */ +export interface TagResourceRequest { + /** + *

                                      The Amazon Resource Name (ARN) of the Amazon FSx resource that you want to + * tag.

                                      + */ + ResourceARN: string | undefined; + + /** + *

                                      A list of tags for the resource. If a tag with a given key already exists, the + * value is replaced by the one specified in this parameter.

                                      + */ + Tags: Tag[] | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +/** + *

                                      The response object for the TagResource operation.

                                      + */ export interface TagResourceResponse {} export namespace TagResourceResponse { @@ -4981,6 +6724,66 @@ export namespace UntagResourceResponse { }); } +export interface UpdateDataRepositoryAssociationRequest { + /** + *

                                      The ID of the data repository association that you are updating.

                                      + */ + AssociationId: string | undefined; + + /** + *

                                      (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                      + */ + ClientRequestToken?: string; + + /** + *

                                      For files imported from a data repository, this value determines the stripe count and + * maximum amount of data per file (in MiB) stored on a single physical disk. The maximum + * number of disks that a single file can be striped across is limited by the total number + * of disks that make up the file system.

                                      + * + *

                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 + * GiB). Amazon S3 objects have a maximum size of 5 TB.

                                      + */ + ImportedFileChunkSize?: number; + + /** + *

                                      The configuration for an Amazon S3 data repository linked to an + * Amazon FSx Lustre file system with a data repository association. + * The configuration defines which file events (new, changed, or + * deleted files or directories) are automatically imported from + * the linked data repository to the file system or automatically + * exported from the file system to the data repository.

                                      + */ + S3?: S3DataRepositoryConfiguration; +} + +export namespace UpdateDataRepositoryAssociationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateDataRepositoryAssociationRequest): any => ({ + ...obj, + }); +} + +export interface UpdateDataRepositoryAssociationResponse { + /** + *

                                      The response object returned after the data repository association is updated.

                                      + */ + Association?: DataRepositoryAssociation; +} + +export namespace UpdateDataRepositoryAssociationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateDataRepositoryAssociationResponse): any => ({ + ...obj, + }); +} + /** *

                                      The configuration object for Amazon FSx for Lustre file systems used in the * UpdateFileSystem operation.

                                      @@ -5000,8 +6803,9 @@ export interface UpdateFileSystemLustreConfiguration { DailyAutomaticBackupStartTime?: string; /** - *

                                      The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                      + *

                                      The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                      */ AutomaticBackupRetentionDays?: number; @@ -5028,11 +6832,19 @@ export interface UpdateFileSystemLustreConfiguration { *

                                      * NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports * file and directory listings of any new objects added to the S3 bucket and any - * existing objects that are changed in the S3 bucket after you choose this option. - *

                                      + * existing objects that are changed in the S3 bucket after you choose this option.

                                      + *
                                    • + *
                                    • + *

                                      + * NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically + * imports file and directory listings of any new objects added to the S3 bucket, any + * existing objects that are changed in the S3 bucket, and any objects that were deleted + * in the S3 bucket.

                                      *
                                    • *
                                    - *

                                    For more information, see Automatically import updates from your S3 bucket.

                                    + *

                                    The AutoImportPolicy parameter is not supported for Lustre file systems with + * the Persistent_2 deployment type. Instead, use + * to update a data repository association on your Persistent_2 file system.

                                    */ AutoImportPolicy?: AutoImportPolicyType | string; @@ -5056,6 +6868,13 @@ export interface UpdateFileSystemLustreConfiguration { *

                                    For more information, see Lustre data compression.

                                    */ DataCompressionType?: DataCompressionType | string; + + /** + *

                                    The Lustre logging configuration used when updating an Amazon FSx for Lustre + * file system. When logging is enabled, Lustre logs error and warning events for + * data repositories associated with your file system to Amazon CloudWatch Logs.

                                    + */ + LogConfiguration?: LustreLogCreateConfiguration; } export namespace UpdateFileSystemLustreConfiguration { @@ -5072,8 +6891,9 @@ export namespace UpdateFileSystemLustreConfiguration { */ export interface UpdateFileSystemOntapConfiguration { /** - *

                                    The number of days to retain automatic backups. Setting this to 0 disables - * automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                    + *

                                    The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                    */ AutomaticBackupRetentionDays?: number; @@ -5112,6 +6932,81 @@ export namespace UpdateFileSystemOntapConfiguration { }); } +/** + *

                                    The configuration updates for an Amazon FSx for OpenZFS file system.

                                    + */ +export interface UpdateFileSystemOpenZFSConfiguration { + /** + *

                                    The number of days to retain automatic backups. Setting this property to + * 0 disables automatic backups. You can retain automatic backups for a + * maximum of 90 days. The default is 0.

                                    + */ + AutomaticBackupRetentionDays?: number; + + /** + *

                                    A Boolean value indicating whether tags for the file system should be copied to + * backups. This value defaults to false. If it's set to true, + * all tags for the file system are copied to all automatic and user-initiated backups + * where the user doesn't specify tags. If this value is true and you specify + * one or more tags, only the specified tags are copied to backups. If you specify one or + * more tags when creating a user-initiated backup, no tags are copied from the file + * system, regardless of this value.

                                    + */ + CopyTagsToBackups?: boolean; + + /** + *

                                    A Boolean value indicating whether tags for the volume should be copied to snapshots. + * This value defaults to false. If it's set to true, all tags + * for the volume are copied to snapshots where the user doesn't specify tags. If this + * value is true and you specify one or more tags, only the specified tags are + * copied to snapshots. If you specify one or more tags when creating the snapshot, no tags + * are copied from the volume, regardless of this value.

                                    + */ + CopyTagsToVolumes?: boolean; + + /** + *

                                    A recurring daily time, in the format HH:MM. HH is the + * zero-padded hour of the day (0-23), and MM is the zero-padded minute of the + * hour. For example, 05:00 specifies 5 AM daily.

                                    + */ + DailyAutomaticBackupStartTime?: string; + + /** + *

                                    The throughput of an Amazon FSx file system, measured in megabytes per second + * (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

                                    + */ + ThroughputCapacity?: number; + + /** + *

                                    A recurring weekly time, in the format D:HH:MM.

                                    + *

                                    + * D is the day of the week, for which 1 represents Monday and 7 + * represents Sunday. For further details, see the ISO-8601 spec as described on Wikipedia.

                                    + *

                                    + * HH is the zero-padded hour of the day (0-23), and MM is + * the zero-padded minute of the hour.

                                    + *

                                    For example, 1:05:00 specifies maintenance at 5 AM Monday.

                                    + */ + WeeklyMaintenanceStartTime?: string; + + /** + *

                                    The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The + * default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per + * GB of storage. The configuration consists of the total number of provisioned SSD IOPS + * and how the amount was provisioned (by the customer or by the system).

                                    + */ + DiskIopsConfiguration?: DiskIopsConfiguration; +} + +export namespace UpdateFileSystemOpenZFSConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateFileSystemOpenZFSConfiguration): any => ({ + ...obj, + }); +} + /** *

                                    The configuration that Amazon FSx uses to join the Windows File Server instance to a * self-managed Microsoft Active Directory (AD) directory.

                                    @@ -5213,26 +7108,29 @@ export namespace UpdateFileSystemWindowsConfiguration { */ export interface UpdateFileSystemRequest { /** - *

                                    Identifies the file system that you are updating.

                                    + *

                                    The ID of the file system that you are updating.

                                    */ FileSystemId: string | undefined; /** *

                                    A string of up to 64 ASCII characters that Amazon FSx uses to ensure - * idempotent updates. This string is automatically filled on your behalf when you use - * the Command Line Interface (CLI) or an Amazon Web Services SDK.

                                    + * idempotent updates. This string is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                    */ ClientRequestToken?: string; /** - *

                                    Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server - * or Amazon FSx for Lustre file system. - * Specifies the storage capacity target value, GiB, to increase the storage capacity for the - * file system that you're updating. You cannot make a storage capacity increase request if - * there is an existing storage capacity increase request in progress.

                                    + *

                                    Use this parameter to increase the storage capacity of an Amazon FSx for Windows + * File Server or Amazon FSx for Lustre file system. Specifies the storage capacity + * target value, in GiB, to increase the storage capacity for the file system that you're + * updating.

                                    + * + *

                                    You can't make a storage capacity increase request if there is an existing storage + * capacity increase request in progress.

                                    + *
                                    *

                                    For Windows file systems, the storage capacity target value must be at least 10 percent - * (%) greater than the current storage capacity value. In order to increase storage capacity, - * the file system must have at least 16 MB/s of throughput capacity.

                                    + * greater than the current storage capacity value. To increase storage capacity, the file system + * must have at least 16 MBps of throughput capacity.

                                    *

                                    For Lustre file systems, the storage capacity target value can be the following:

                                    *
                                      *
                                    • @@ -5241,22 +7139,29 @@ export interface UpdateFileSystemRequest { *
                                    • *
                                    • *

                                      For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for - * 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB file systems. The values must be greater - * than the current storage capacity.

                                      + * 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput + * per TiB file systems. The values must be greater than the current storage capacity.

                                      *
                                    • *
                                    • - *

                                      For SCRATCH_1 file systems, you cannot increase the storage capacity.

                                      + *

                                      For SCRATCH_1 file systems, you can't increase the storage capacity.

                                      *
                                    • *
                                    + *

                                    For OpenZFS file systems, the input/output operations per second (IOPS) automatically + * scale with increases to the storage capacity if IOPS is configured for automatic scaling. If + * the storage capacity increase would result in less than 3 IOPS per GiB of storage, this + * operation returns an error.

                                    *

                                    For more information, see Managing storage - * capacity in the Amazon FSx for Windows File Server User Guide - * and Managing storage and throughput capacity in the Amazon FSx for Lustre - * User Guide.

                                    + * capacity in the Amazon FSx for Windows File Server User + * Guide, Managing storage and throughput + * capacity in the Amazon FSx for Lustre User Guide, and + * Managing storage capacity in the Amazon FSx for OpenZFS User + * Guide.

                                    */ StorageCapacity?: number; /** - *

                                    The configuration updates for an Amazon FSx for Windows File Server file system.

                                    + *

                                    The configuration updates for an Amazon FSx for Windows File Server file + * system.

                                    */ WindowsConfiguration?: UpdateFileSystemWindowsConfiguration; @@ -5270,6 +7175,11 @@ export interface UpdateFileSystemRequest { *

                                    The configuration updates for an Amazon FSx for NetApp ONTAP file system.

                                    */ OntapConfiguration?: UpdateFileSystemOntapConfiguration; + + /** + *

                                    The configuration updates for an Amazon FSx for OpenZFS file system.

                                    + */ + OpenZFSConfiguration?: UpdateFileSystemOpenZFSConfiguration; } export namespace UpdateFileSystemRequest { @@ -5287,9 +7197,38 @@ export namespace UpdateFileSystemRequest { }); } +export interface UpdateSnapshotRequest { + /** + *

                                    (Optional) An idempotency token for resource creation, in a string of up to 64 + * ASCII characters. This token is automatically filled on your behalf when you use the + * Command Line Interface (CLI) or an Amazon Web Services SDK.

                                    + */ + ClientRequestToken?: string; + + /** + *

                                    The name of the snapshot to update.

                                    + */ + Name: string | undefined; + + /** + *

                                    The ID of the snapshot that you want to update, in the format + * fsvolsnap-0123456789abcdef0.

                                    + */ + SnapshotId: string | undefined; +} + +export namespace UpdateSnapshotRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateSnapshotRequest): any => ({ + ...obj, + }); +} + /** *

                                    Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. - * Pleae note, account credentials are not returned in the response payload.

                                    + * Please note, account credentials are not returned in the response payload.

                                    */ export interface UpdateSvmActiveDirectoryConfiguration { /** @@ -5410,6 +7349,69 @@ export namespace UpdateOntapVolumeConfiguration { }); } +/** + *

                                    Used to specify changes to the OpenZFS configuration for the volume that you are + * updating.

                                    + */ +export interface UpdateOpenZFSVolumeConfiguration { + /** + *

                                    The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't + * reserve more storage than the parent volume has reserved.

                                    + */ + StorageCapacityReservationGiB?: number; + + /** + *

                                    + *

                                    The maximum amount of storage in gibibytes (GiB) that the volume can use from its + * parent. You can specify a quota larger than the storage on the parent volume.

                                    + */ + StorageCapacityQuotaGiB?: number; + + /** + *

                                    + *

                                    Specifies the method used to compress the data on the volume. Unless the compression + * type is specified, volumes inherit the DataCompressionType value of their + * parent volume.

                                    + *
                                      + *
                                    • + *

                                      + * NONE - Doesn't compress the data on the volume.

                                      + *
                                    • + *
                                    • + *

                                      + * ZSTD - Compresses the data in the volume using the Zstandard + * (ZSTD) compression algorithm. This algorithm reduces the amount of space used on + * your volume and has very little impact on compute resources.

                                      + *
                                    • + *
                                    + */ + DataCompressionType?: OpenZFSDataCompressionType | string; + + /** + *

                                    The configuration object for mounting a Network File System (NFS) file system.

                                    + */ + NfsExports?: OpenZFSNfsExport[]; + + /** + *

                                    An object specifying how much storage users or groups can use on the volume.

                                    + */ + UserAndGroupQuotas?: OpenZFSUserOrGroupQuota[]; + + /** + *

                                    A Boolean value indicating whether the volume is read-only.

                                    + */ + ReadOnly?: boolean; +} + +export namespace UpdateOpenZFSVolumeConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateOpenZFSVolumeConfiguration): any => ({ + ...obj, + }); +} + export interface UpdateVolumeRequest { /** *

                                    (Optional) An idempotency token for resource creation, in a string of up to 64 @@ -5419,44 +7421,41 @@ export interface UpdateVolumeRequest { ClientRequestToken?: string; /** - *

                                    Specifies the volume that you want to update, formatted fsvol-0123456789abcdef0.

                                    + *

                                    The ID of the volume that you want to update, in the format + * fsvol-0123456789abcdef0.

                                    */ VolumeId: string | undefined; /** - *

                                    The ONTAP configuration of the volume you are updating.

                                    + *

                                    The configuration of the ONTAP volume that you are updating.

                                    */ OntapConfiguration?: UpdateOntapVolumeConfiguration; -} -export namespace UpdateVolumeRequest { /** - * @internal + *

                                    The name of the OpenZFS volume. OpenZFS root volumes are automatically named + * FSX. Child volume names must be unique among their parent volume's + * children. The name of the volume is part of the mount string for the OpenZFS volume.

                                    */ - export const filterSensitiveLog = (obj: UpdateVolumeRequest): any => ({ - ...obj, - }); -} + Name?: string; -export interface UpdateVolumeResponse { /** - *

                                    Returned after a successful UpdateVolume API operation, describing the volume just updated.

                                    + *

                                    The configuration of the OpenZFS volume that you are updating.

                                    */ - Volume?: Volume; + OpenZFSConfiguration?: UpdateOpenZFSVolumeConfiguration; } -export namespace UpdateVolumeResponse { +export namespace UpdateVolumeRequest { /** * @internal */ - export const filterSensitiveLog = (obj: UpdateVolumeResponse): any => ({ + export const filterSensitiveLog = (obj: UpdateVolumeRequest): any => ({ ...obj, }); } /** - *

                                    Describes a specific Amazon FSx administrative action for the current Windows or - * Lustre file system.

                                    + *

                                    Describes a specific Amazon FSx administrative action for the current Windows, + * Lustre, or OpenZFS file system.

                                    */ export interface AdministrativeAction { /** @@ -5464,15 +7463,16 @@ export interface AdministrativeAction { *
                                      *
                                    • *

                                      - * FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the user from the - * Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

                                      + * FILE_SYSTEM_UPDATE - A file system update administrative action + * initiated from the Amazon FSx console, API + * (UpdateFileSystem), or CLI + * (update-file-system).

                                      *
                                    • *
                                    • *

                                      - * STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE - * task to increase a file system's storage capacity completes successfully, a - * STORAGE_OPTIMIZATION task starts. - *

                                      + * STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE + * task to increase a file system's storage capacity has been completed + * successfully, a STORAGE_OPTIMIZATION task starts.

                                      *
                                        *
                                      • *

                                        For Windows, storage optimization is the process of migrating the file system data @@ -5482,40 +7482,63 @@ export interface AdministrativeAction { *

                                        For Lustre, storage optimization consists of rebalancing the data across the existing and * newly added file servers.

                                        *
                                      • + *
                                      • + *

                                        For OpenZFS, storage optimization consists of migrating data from the + * older smaller disks to the newer larger disks.

                                        + *
                                      • *
                                      - *

                                      You can track the storage optimization progress using the + *

                                      You can track the storage-optimization progress using the * ProgressPercent property. When - * STORAGE_OPTIMIZATION completes successfully, the parent - * FILE_SYSTEM_UPDATE action status changes to + * STORAGE_OPTIMIZATION has been completed successfully, the + * parent FILE_SYSTEM_UPDATE action status changes to * COMPLETED. For more information, see Managing - * storage capacity in the Amazon FSx for Windows File Server - * User Guide and Managing storage - * and throughput capacity in the Amazon FSx for Lustre User - * Guide.

                                      + * storage capacity in the Amazon FSx for Windows + * File Server User Guide, Managing storage + * and throughput capacity in the Amazon FSx for + * Lustre User Guide, and Managing storage capacity in the + * Amazon FSx for OpenZFS User Guide.

                                      *
                                    • *
                                    • *

                                      - * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new DNS alias with the file system. - * For more information, see - * .

                                      + * FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain + * Name System (DNS) alias with the file system. For more information, see + * AssociateFileSystemAliases.

                                      *
                                    • *
                                    • *

                                      * FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system. - * For more information, see .

                                      + * For more information, see DisassociateFileSystemAliases.

                                      + *
                                    • + *
                                    • + *

                                      + * VOLUME_UPDATE - A volume update to an Amazon FSx for NetApp ONTAP or + * Amazon FSx for OpenZFS volume initiated from the Amazon FSx + * console, API (UpdateVolume), or CLI + * (update-volume).

                                      + *
                                    • + *
                                    • + *

                                      + * SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for + * OpenZFS volume initiated from the Amazon FSx console, API + * (UpdateSnapshot), or CLI (update-snapshot).

                                      + *
                                    • + *
                                    • + *

                                      + * RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System + * (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

                                      *
                                    • *
                                    */ AdministrativeActionType?: AdministrativeActionType | string; /** - *

                                    Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. - * Does not apply to any other administrative action type.

                                    + *

                                    The percentage-complete status of a STORAGE_OPTIMIZATION administrative + * action. Does not apply to any other administrative action type.

                                    */ ProgressPercent?: number; /** - *

                                    Time that the administrative action request was received.

                                    + *

                                    The time that the administrative action request was received.

                                    */ RequestTime?: Date; @@ -5524,7 +7547,8 @@ export interface AdministrativeAction { *
                                      *
                                    • *

                                      - * FAILED - Amazon FSx failed to process the administrative action successfully.

                                      + * FAILED - Amazon FSx failed to process the administrative action + * successfully.

                                      *
                                    • *
                                    • *

                                      @@ -5532,22 +7556,19 @@ export interface AdministrativeAction { *

                                    • *
                                    • *

                                      - * PENDING - Amazon FSx is waiting to process the administrative action.

                                      + * PENDING - Amazon FSx is waiting to process the administrative + * action.

                                      *
                                    • *
                                    • *

                                      - * COMPLETED - Amazon FSx has finished processing the administrative task.

                                      + * COMPLETED - Amazon FSx has finished processing the administrative + * task.

                                      *
                                    • *
                                    • *

                                      - * UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has - * updated the file system with the new storage capacity, and is now performing the - * storage optimization process. For more information, see - * Managing - * storage capacity in the Amazon FSx for Windows File Server - * User Guide and Managing storage - * and throughput capacity in the Amazon FSx for Lustre User - * Guide.

                                      + * UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx + * has updated the file system with the new storage capacity, and is now performing + * the storage-optimization process.

                                      *
                                    • *
                                    */ @@ -5559,205 +7580,642 @@ export interface AdministrativeAction { * Returned for FILE_SYSTEM_UPDATE administrative actions. *

                                    */ - TargetFileSystemValues?: FileSystem; + TargetFileSystemValues?: FileSystem; + + /** + *

                                    Provides information about a failed administrative action.

                                    + */ + FailureDetails?: AdministrativeActionFailureDetails; + + /** + *

                                    Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS + * volume.

                                    + */ + TargetVolumeValues?: Volume; + + /** + *

                                    A snapshot of an Amazon FSx for OpenZFS volume.

                                    + */ + TargetSnapshotValues?: Snapshot; +} + +export namespace AdministrativeAction { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AdministrativeAction): any => ({ + ...obj, + }); +} + +/** + *

                                    A description of a specific Amazon FSx file system.

                                    + */ +export interface FileSystem { + /** + *

                                    The Amazon Web Services account that created the file system. If the file system was created by an + * Identity and Access Management (IAM) user, the Amazon Web Services account to which the + * IAM user belongs is the owner.

                                    + */ + OwnerId?: string; + + /** + *

                                    The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), + * also known as Unix time.

                                    + */ + CreationTime?: Date; + + /** + *

                                    The system-generated, unique 17-digit ID of the file system.

                                    + */ + FileSystemId?: string; + + /** + *

                                    The type of Amazon FSx file system, which can be LUSTRE, + * WINDOWS, ONTAP, or OPENZFS.

                                    + */ + FileSystemType?: FileSystemType | string; + + /** + *

                                    The lifecycle status of the file system. The following are the possible values and + * what they mean:

                                    + *
                                      + *
                                    • + *

                                      + * AVAILABLE - The file system is in a healthy state, and is reachable and available for use.

                                      + *
                                    • + *
                                    • + *

                                      + * CREATING - Amazon FSx is creating the new file system.

                                      + *
                                    • + *
                                    • + *

                                      + * DELETING - Amazon FSx is deleting an existing file system.

                                      + *
                                    • + *
                                    • + *

                                      + * FAILED - An existing file system has experienced an unrecoverable failure. + * When creating a new file system, Amazon FSx was unable to create the file system.

                                      + *
                                    • + *
                                    • + *

                                      + * MISCONFIGURED - The file system is in a failed but recoverable state.

                                      + *
                                    • + *
                                    • + *

                                      + * UPDATING - The file system is undergoing a customer-initiated update.

                                      + *
                                    • + *
                                    + */ + Lifecycle?: FileSystemLifecycle | string; + + /** + *

                                    A structure providing details of any failures that occurred when creating a file + * system.

                                    + */ + FailureDetails?: FileSystemFailureDetails; + + /** + *

                                    The storage capacity of the file system in gibibytes (GiB).

                                    + */ + StorageCapacity?: number; + + /** + *

                                    The type of storage the file system is using. + * If set to SSD, the file system uses solid state drive storage. + * If set to HDD, the file system uses hard disk drive storage. + *

                                    + */ + StorageType?: StorageType | string; + + /** + *

                                    The ID of the primary virtual private cloud (VPC) for the file system.

                                    + */ + VpcId?: string; + + /** + *

                                    Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and + * ONTAP MULTI_AZ_1 file system deployment type, there are two subnet IDs, one for + * the preferred file server and one for the standby file server. The preferred file server subnet + * identified in the PreferredSubnetID property. All other file systems have only one subnet ID.

                                    + *

                                    For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of + * the subnet that contains the file system's endpoint. For MULTI_AZ_1 Windows and + * ONTAP file systems, the file system endpoint is available in the PreferredSubnetID.

                                    + */ + SubnetIds?: string[]; + + /** + *

                                    The IDs of the elastic network interfaces from which a specific file system is + * accessible. The elastic network interface is automatically created in the same virtual + * private cloud (VPC) that the Amazon FSx file system was created in. For more + * information, see Elastic Network Interfaces in + * the Amazon EC2 User Guide. + *

                                    + * + *

                                    For an Amazon FSx for Windows File Server file system, you can have one + * network interface ID. For an Amazon FSx for Lustre file system, you can have + * more than one.

                                    + */ + NetworkInterfaceIds?: string[]; + + /** + *

                                    The Domain Name System (DNS) name for the file system.

                                    + */ + DNSName?: string; + + /** + *

                                    The ID of the Key Management Service (KMS) key used to encrypt the file + * system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and PERSISTENT + * Amazon FSx for Lustre file systems at rest. If this ID isn't specified, the + * Amazon FSx-managed key for your account is used. The scratch Amazon FSx for Lustre file systems are always encrypted at rest using the Amazon + * FSx-managed key for your account. For more information, see Encrypt in the + * Key Management Service API Reference.

                                    + */ + KmsKeyId?: string; + + /** + *

                                    The Amazon Resource Name (ARN) for the file system resource.

                                    + */ + ResourceARN?: string; + + /** + *

                                    The tags to associate with the file system. For more information, see Tagging your + * Amazon EC2 resources in the Amazon EC2 User + * Guide.

                                    + */ + Tags?: Tag[]; + + /** + *

                                    The configuration for this FSx for Windows File Server file system.

                                    + */ + WindowsConfiguration?: WindowsFileSystemConfiguration; + + /** + *

                                    The configuration for the Amazon FSx for Lustre file system.

                                    + */ + LustreConfiguration?: LustreFileSystemConfiguration; + + /** + *

                                    A list of administrative actions for the file system that are in process or waiting to + * be processed. Administrative actions describe changes to the Amazon FSx system + * that you have initiated using the UpdateFileSystem operation.

                                    + */ + AdministrativeActions?: AdministrativeAction[]; + + /** + *

                                    The configuration for this FSx for ONTAP file system.

                                    + */ + OntapConfiguration?: OntapFileSystemConfiguration; + + /** + *

                                    The Lustre version of the Amazon FSx for Lustrefile system, either + * 2.10 or 2.12.

                                    + */ + FileSystemTypeVersion?: string; + + /** + *

                                    The configuration for this Amazon FSx for OpenZFS file system.

                                    + */ + OpenZFSConfiguration?: OpenZFSFileSystemConfiguration; +} + +export namespace FileSystem { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FileSystem): any => ({ + ...obj, + }); +} + +/** + *

                                    A snapshot of an Amazon FSx for OpenZFS volume.

                                    + */ +export interface Snapshot { + /** + *

                                    The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services + * resources. We require an ARN when you need to specify a resource unambiguously across + * all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in + * the Amazon Web Services General Reference.

                                    + */ + ResourceARN?: string; + + /** + *

                                    The ID of the snapshot.

                                    + */ + SnapshotId?: string; + + /** + *

                                    The name of the snapshot.

                                    + */ + Name?: string; + + /** + *

                                    The ID of the volume that the snapshot is of.

                                    + */ + VolumeId?: string; + + /** + *

                                    The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), + * also known as Unix time.

                                    + */ + CreationTime?: Date; + + /** + *

                                    The lifecycle status of the snapshot.

                                    + *
                                      + *
                                    • + *

                                      + * PENDING - Amazon FSx hasn't started creating the + * snapshot.

                                      + *
                                    • + *
                                    • + *

                                      + * CREATING - Amazon FSx is creating the snapshot.

                                      + *
                                    • + *
                                    • + *

                                      + * DELETING - Amazon FSx is deleting the snapshot.

                                      + *
                                    • + *
                                    • + *

                                      + * AVAILABLE - The snapshot is fully available.

                                      + *
                                    • + *
                                    + */ + Lifecycle?: SnapshotLifecycle | string; + + /** + *

                                    A list of Tag values, with a maximum of 50 elements.

                                    + */ + Tags?: Tag[]; + + /** + *

                                    A list of administrative actions for the file system that are in process or waiting to + * be processed. Administrative actions describe changes to the Amazon FSx + * system.

                                    + */ + AdministrativeActions?: AdministrativeAction[]; +} + +export namespace Snapshot { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Snapshot): any => ({ + ...obj, + }); +} + +/** + *

                                    Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS + * volume.

                                    + */ +export interface Volume { + /** + *

                                    The time that the resource was created, in seconds (since 1970-01-01T00:00:00Z), + * also known as Unix time.

                                    + */ + CreationTime?: Date; + + /** + *

                                    The globally unique ID of the file system, assigned by Amazon FSx.

                                    + */ + FileSystemId?: string; + + /** + *

                                    The lifecycle status of the volume.

                                    + *
                                      + *
                                    • + *

                                      + * AVAILABLE - The volume is fully available for + * use.

                                      + *
                                    • + *
                                    • + *

                                      + * CREATED - The volume has been created.

                                      + *
                                    • + *
                                    • + *

                                      + * CREATING - Amazon FSx is creating the new volume.

                                      + *
                                    • + *
                                    • + *

                                      + * DELETING - Amazon FSx is deleting an existing + * volume.

                                      + *
                                    • + *
                                    • + *

                                      + * FAILED - Amazon FSx was unable to create the + * volume.

                                      + *
                                    • + *
                                    • + *

                                      + * MISCONFIGURED - The volume is in a failed but recoverable + * state.

                                      + *
                                    • + *
                                    • + *

                                      + * PENDING - Amazon FSx hasn't started creating the + * volume.

                                      + *
                                    • + *
                                    + */ + Lifecycle?: VolumeLifecycle | string; + + /** + *

                                    The name of the volume.

                                    + */ + Name?: string; + + /** + *

                                    The configuration of an Amazon FSx for NetApp ONTAP volume.

                                    + */ + OntapConfiguration?: OntapVolumeConfiguration; + + /** + *

                                    The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services + * resources. We require an ARN when you need to specify a resource unambiguously across + * all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in + * the Amazon Web Services General Reference.

                                    + */ + ResourceARN?: string; + + /** + *

                                    A list of Tag values, with a maximum of 50 elements.

                                    + */ + Tags?: Tag[]; + + /** + *

                                    The system-generated, unique ID of the volume.

                                    + */ + VolumeId?: string; + + /** + *

                                    The type of the volume.

                                    + */ + VolumeType?: VolumeType | string; + + /** + *

                                    The reason why the volume lifecycle status changed.

                                    + */ + LifecycleTransitionReason?: LifecycleTransitionReason; + + /** + *

                                    A list of administrative actions for the file system that are in process or waiting to + * be processed. Administrative actions describe changes to the Amazon FSx system + * that you initiated.

                                    + */ + AdministrativeActions?: AdministrativeAction[]; + + /** + *

                                    The configuration of an Amazon FSx for OpenZFS volume.

                                    + */ + OpenZFSConfiguration?: OpenZFSVolumeConfiguration; +} + +export namespace Volume { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Volume): any => ({ + ...obj, + }); +} + +/** + *

                                    The response object for the CreateFileSystemFromBackup + * operation.

                                    + */ +export interface CreateFileSystemFromBackupResponse { + /** + *

                                    A description of the file system.

                                    + */ + FileSystem?: FileSystem; +} + +export namespace CreateFileSystemFromBackupResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFileSystemFromBackupResponse): any => ({ + ...obj, + }); +} + +/** + *

                                    The response object returned after the file system is created.

                                    + */ +export interface CreateFileSystemResponse { + /** + *

                                    The configuration of the file system that was created.

                                    + */ + FileSystem?: FileSystem; +} +export namespace CreateFileSystemResponse { /** - *

                                    Provides information about a failed administrative action.

                                    + * @internal */ - FailureDetails?: AdministrativeActionFailureDetails; + export const filterSensitiveLog = (obj: CreateFileSystemResponse): any => ({ + ...obj, + }); +} +export interface CreateSnapshotResponse { /** - *

                                    Describes an Amazon FSx for NetApp ONTAP volume.

                                    + *

                                    A description of the snapshot.

                                    */ - TargetVolumeValues?: Volume; + Snapshot?: Snapshot; } -export namespace AdministrativeAction { +export namespace CreateSnapshotResponse { /** * @internal */ - export const filterSensitiveLog = (obj: AdministrativeAction): any => ({ + export const filterSensitiveLog = (obj: CreateSnapshotResponse): any => ({ ...obj, }); } -/** - *

                                    A description of a specific Amazon FSx file system.

                                    - */ -export interface FileSystem { +export interface CreateVolumeFromBackupResponse { /** - *

                                    The Amazon Web Services account that created the file system. If the file system was created by an - * Identity and Access Management (IAM) user, the Amazon Web Services account to which the - * IAM user belongs is the owner.

                                    + *

                                    Returned after a successful CreateVolumeFromBackup API operation, + * describing the volume just created.

                                    */ - OwnerId?: string; + Volume?: Volume; +} +export namespace CreateVolumeFromBackupResponse { /** - *

                                    The time that the file system was created, in seconds (since 1970-01-01T00:00:00Z), - * also known as Unix time.

                                    + * @internal */ - CreationTime?: Date; + export const filterSensitiveLog = (obj: CreateVolumeFromBackupResponse): any => ({ + ...obj, + }); +} +export interface CreateVolumeResponse { /** - *

                                    The system-generated, unique 17-digit ID of the file system.

                                    + *

                                    Returned after a successful CreateVolume API operation, describing the volume just created.

                                    */ - FileSystemId?: string; + Volume?: Volume; +} +export namespace CreateVolumeResponse { /** - *

                                    The type of Amazon FSx file system, which can be LUSTRE, WINDOWS, - * or ONTAP.

                                    + * @internal */ - FileSystemType?: FileSystemType | string; + export const filterSensitiveLog = (obj: CreateVolumeResponse): any => ({ + ...obj, + }); +} +export interface ReleaseFileSystemNfsV3LocksResponse { /** - *

                                    The lifecycle status of the file system, following are the possible values and what they mean:

                                    - *
                                      - *
                                    • - *

                                      - * AVAILABLE - The file system is in a healthy state, and is reachable and available for use.

                                      - *
                                    • - *
                                    • - *

                                      - * CREATING - Amazon FSx is creating the new file system.

                                      - *
                                    • - *
                                    • - *

                                      - * DELETING - Amazon FSx is deleting an existing file system.

                                      - *
                                    • - *
                                    • - *

                                      - * FAILED - An existing file system has experienced an unrecoverable failure. - * When creating a new file system, Amazon FSx was unable to create the file system.

                                      - *
                                    • - *
                                    • - *

                                      - * MISCONFIGURED indicates that the file system is in a failed but recoverable state.

                                      - *
                                    • - *
                                    • - *

                                      - * UPDATING indicates that the file system is undergoing a customer initiated update.

                                      - *
                                    • - *
                                    + *

                                    A description of a specific Amazon FSx file system.

                                    */ - Lifecycle?: FileSystemLifecycle | string; + FileSystem?: FileSystem; +} +export namespace ReleaseFileSystemNfsV3LocksResponse { /** - *

                                    A structure providing details of any failures that occur when creating the file system - * has failed.

                                    + * @internal */ - FailureDetails?: FileSystemFailureDetails; + export const filterSensitiveLog = (obj: ReleaseFileSystemNfsV3LocksResponse): any => ({ + ...obj, + }); +} +/** + *

                                    The response object for the UpdateFileSystem operation.

                                    + */ +export interface UpdateFileSystemResponse { /** - *

                                    The storage capacity of the file system in gibibytes (GiB).

                                    + *

                                    A description of the file system that was updated.

                                    */ - StorageCapacity?: number; + FileSystem?: FileSystem; +} +export namespace UpdateFileSystemResponse { /** - *

                                    The storage type of the file system. - * Valid values are SSD and HDD. - * If set to SSD, the file system uses solid state drive storage. - * If set to HDD, the file system uses hard disk drive storage. - *

                                    + * @internal */ - StorageType?: StorageType | string; + export const filterSensitiveLog = (obj: UpdateFileSystemResponse): any => ({ + ...obj, + }); +} +export interface UpdateSnapshotResponse { /** - *

                                    The ID of the primary VPC for the file system.

                                    + *

                                    Returned after a successful UpdateSnapshot operation, describing the + * snapshot that you updated.

                                    */ - VpcId?: string; + Snapshot?: Snapshot; +} +export namespace UpdateSnapshotResponse { /** - *

                                    Specifies the IDs of the subnets that the file system is accessible from. For Windows and - * ONTAP MULTI_AZ_1 file system deployment type, there are two subnet IDs, one for - * the preferred file server and one for the standby file server. The preferred file server subnet - * identified in the PreferredSubnetID property. All other file systems have only one subnet ID.

                                    - *

                                    For Lustre file systems, and Single-AZ Windows file systems, this is the ID of - * the subnet that contains the endpoint for the file system. For MULTI_AZ_1 Windows and - * ONTAP file systems, the endpoint for the file system is available in the PreferredSubnetID.

                                    + * @internal */ - SubnetIds?: string[]; + export const filterSensitiveLog = (obj: UpdateSnapshotResponse): any => ({ + ...obj, + }); +} +export interface UpdateVolumeResponse { /** - *

                                    The IDs of the elastic network interface from which a specific file system is - * accessible. The elastic network interface is automatically created in the same VPC that - * the Amazon FSx file system was created in. For more information, see Elastic Network - * Interfaces in the Amazon EC2 User Guide. - *

                                    - * - *

                                    For an Amazon FSx for Windows File Server file system, you can have one network - * interface ID. For an Amazon FSx for Lustre file system, you can have more than - * one.

                                    + *

                                    A description of the volume just updated. Returned after a successful + * UpdateVolume API operation.

                                    */ - NetworkInterfaceIds?: string[]; + Volume?: Volume; +} +export namespace UpdateVolumeResponse { /** - *

                                    The DNS name for the file system.

                                    + * @internal */ - DNSName?: string; + export const filterSensitiveLog = (obj: UpdateVolumeResponse): any => ({ + ...obj, + }); +} +/** + *

                                    The response object for DescribeFileSystems operation.

                                    + */ +export interface DescribeFileSystemsResponse { /** - *

                                    The ID of the Key Management Service (KMS) key used to encrypt the file system's data - * for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and - * persistent Amazon FSx for Lustre file systems at rest. If not specified, the Amazon FSx - * managed key is used. The scratch Amazon FSx for Lustre file systems are always encrypted at rest using - * Amazon FSx managed keys. For more information, see Encrypt - * in the Key Management Service API Reference.

                                    + *

                                    An array of file system descriptions.

                                    */ - KmsKeyId?: string; + FileSystems?: FileSystem[]; /** - *

                                    The Amazon Resource Name (ARN) for the file system resource.

                                    + *

                                    Present if there are more file systems than returned in the response (String). You + * can use the NextToken value in the later request to fetch the + * descriptions.

                                    */ - ResourceARN?: string; + NextToken?: string; +} +export namespace DescribeFileSystemsResponse { /** - *

                                    The tags to associate with the file system. For more information, see Tagging Your - * Amazon EC2 Resources in the Amazon EC2 User - * Guide.

                                    + * @internal */ - Tags?: Tag[]; + export const filterSensitiveLog = (obj: DescribeFileSystemsResponse): any => ({ + ...obj, + }); +} +export interface DescribeSnapshotsResponse { /** - *

                                    The configuration for this Microsoft Windows file system.

                                    + *

                                    An array of snapshots.

                                    */ - WindowsConfiguration?: WindowsFileSystemConfiguration; + Snapshots?: Snapshot[]; /** - *

                                    The configuration for the Amazon FSx for Lustre file system.

                                    + *

                                    (Optional) Opaque pagination token returned from a previous operation (String). If + * present, this token indicates from what point you can continue processing the request, where + * the previous NextToken value left off.

                                    */ - LustreConfiguration?: LustreFileSystemConfiguration; + NextToken?: string; +} +export namespace DescribeSnapshotsResponse { /** - *

                                    A list of administrative actions for the file system that are in process or waiting to be processed. - * Administrative actions describe changes to the Amazon FSx file system that you have initiated using - * the UpdateFileSystem action.

                                    + * @internal */ - AdministrativeActions?: AdministrativeAction[]; + export const filterSensitiveLog = (obj: DescribeSnapshotsResponse): any => ({ + ...obj, + }); +} +export interface DescribeVolumesResponse { /** - *

                                    The configuration for this FSx for NetApp ONTAP file system.

                                    + *

                                    Returned after a successful DescribeVolumes operation, describing each volume.

                                    */ - OntapConfiguration?: OntapFileSystemConfiguration; + Volumes?: Volume[]; /** - *

                                    The version of your Amazon FSx for Lustre file system, either - * 2.10 or 2.12.

                                    + *

                                    (Optional) Opaque pagination token returned from a previous operation (String). If + * present, this token indicates from what point you can continue processing the request, where + * the previous NextToken value left off.

                                    */ - FileSystemTypeVersion?: string; + NextToken?: string; } -export namespace FileSystem { +export namespace DescribeVolumesResponse { /** * @internal */ - export const filterSensitiveLog = (obj: FileSystem): any => ({ + export const filterSensitiveLog = (obj: DescribeVolumesResponse): any => ({ ...obj, }); } /** - *

                                    A backup of an Amazon FSx for Windows File Server or Amazon FSx for Lustre file system, - * or of an Amazon FSx for NetApp ONTAP volume.

                                    + *

                                    A backup of an Amazon FSx for Windows File Server, Amazon FSx for + * Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx + * for OpenZFS file system.

                                    */ export interface Backup { /** @@ -5774,7 +8232,7 @@ export interface Backup { *
                                  • *
                                  • *

                                    - * PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.

                                    + * PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.

                                    *
                                  • *
                                  • *

                                    @@ -5782,7 +8240,7 @@ export interface Backup { *

                                  • *
                                  • *

                                    - * TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to S3.

                                    + * TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.

                                    *
                                  • *
                                  • *

                                    @@ -5790,23 +8248,24 @@ export interface Backup { *

                                  • *
                                  • *

                                    - * DELETED - Amazon FSx deleted the backup and it is no longer available.

                                    + * DELETED - Amazon FSx deleted the backup and it's no longer + * available.

                                    *
                                  • *
                                  • *

                                    - * FAILED - Amazon FSx could not complete the backup.

                                    + * FAILED - Amazon FSx couldn't finish the backup.

                                    *
                                  • *
                                  */ Lifecycle: BackupLifecycle | string | undefined; /** - *

                                  Details explaining any failures that occur when creating a backup.

                                  + *

                                  Details explaining any failures that occurred when creating a backup.

                                  */ FailureDetails?: BackupFailureDetails; /** - *

                                  The type of the file system backup.

                                  + *

                                  The type of the file-system backup.

                                  */ Type: BackupType | string | undefined; @@ -5822,8 +8281,7 @@ export interface Backup { /** *

                                  The ID of the Key Management Service (KMS) key used to encrypt the - * backup of the Amazon FSx file system's data at rest. - *

                                  + * backup of the Amazon FSx file system's data at rest.

                                  */ KmsKeyId?: string; @@ -5833,18 +8291,19 @@ export interface Backup { ResourceARN?: string; /** - *

                                  Tags associated with a particular file system.

                                  + *

                                  The tags associated with a particular file system.

                                  */ Tags?: Tag[]; /** - *

                                  Metadata of the file system associated with the backup. This metadata is persisted + *

                                  The metadata of the file system associated with the backup. This metadata is persisted * even if the file system is deleted.

                                  */ FileSystem: FileSystem | undefined; /** - *

                                  The configuration of the self-managed Microsoft Active Directory (AD) to which the Windows File Server instance is joined.

                                  + *

                                  The configuration of the self-managed Microsoft Active Directory directory to which + * the Windows File Server instance is joined.

                                  */ DirectoryInformation?: ActiveDirectoryBackupAttributes; @@ -5855,7 +8314,7 @@ export interface Backup { OwnerId?: string; /** - *

                                  The ID of the source backup. Specifies the backup you are copying.

                                  + *

                                  The ID of the source backup. Specifies the backup that you are copying.

                                  */ SourceBackupId?: string; @@ -5866,12 +8325,13 @@ export interface Backup { SourceBackupRegion?: string; /** - *

                                  Specifies the resource type that is backed up.

                                  + *

                                  Specifies the resource type that's backed up.

                                  */ ResourceType?: ResourceType | string; /** - *

                                  Describes an Amazon FSx for NetApp ONTAP volume.

                                  + *

                                  Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS + * volume.

                                  */ Volume?: Volume; } @@ -5885,68 +8345,11 @@ export namespace Backup { }); } -/** - *

                                  The response object for the CreateFileSystemFromBackup - * operation.

                                  - */ -export interface CreateFileSystemFromBackupResponse { - /** - *

                                  A description of the file system.

                                  - */ - FileSystem?: FileSystem; -} - -export namespace CreateFileSystemFromBackupResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateFileSystemFromBackupResponse): any => ({ - ...obj, - }); -} - -/** - *

                                  The response object returned after the file system is created.

                                  - */ -export interface CreateFileSystemResponse { - /** - *

                                  The configuration of the file system that was created.

                                  - */ - FileSystem?: FileSystem; -} - -export namespace CreateFileSystemResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: CreateFileSystemResponse): any => ({ - ...obj, - }); -} - -/** - *

                                  The response object for the UpdateFileSystem operation.

                                  - */ -export interface UpdateFileSystemResponse { - /** - *

                                  A description of the file system that was updated.

                                  - */ - FileSystem?: FileSystem; -} - -export namespace UpdateFileSystemResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: UpdateFileSystemResponse): any => ({ - ...obj, - }); -} - export interface CopyBackupResponse { /** - *

                                  A backup of an Amazon FSx for Windows File Server or Amazon FSx for Lustre file system, - * or of an Amazon FSx for NetApp ONTAP volume.

                                  + *

                                  A backup of an Amazon FSx for Windows File Server, Amazon FSx for + * Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx + * for OpenZFS file system.

                                  */ Backup?: Backup; } @@ -5980,33 +8383,7 @@ export namespace CreateBackupResponse { } /** - *

                                  The response object for DescribeFileSystems operation.

                                  - */ -export interface DescribeFileSystemsResponse { - /** - *

                                  An array of file system descriptions.

                                  - */ - FileSystems?: FileSystem[]; - - /** - *

                                  Present if there are more file systems than returned in the response (String). You - * can use the NextToken value in the later request to fetch the - * descriptions.

                                  - */ - NextToken?: string; -} - -export namespace DescribeFileSystemsResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: DescribeFileSystemsResponse): any => ({ - ...obj, - }); -} - -/** - *

                                  Response object for DescribeBackups operation.

                                  + *

                                  Response object for the DescribeBackups operation.

                                  */ export interface DescribeBackupsResponse { /** @@ -6015,9 +8392,9 @@ export interface DescribeBackupsResponse { Backups?: Backup[]; /** - *

                                  This is present if there are more backups than returned in the response (String). - * You can use the NextToken value in the later request to fetch the backups. - *

                                  + *

                                  A NextToken value is present if there are more backups than returned in + * the response. You can use the NextToken value in the subsequent request to + * fetch the backups.

                                  */ NextToken?: string; } diff --git a/clients/client-fsx/src/pagination/DescribeDataRepositoryAssociationsPaginator.ts b/clients/client-fsx/src/pagination/DescribeDataRepositoryAssociationsPaginator.ts new file mode 100644 index 000000000000..1b270b6eff2b --- /dev/null +++ b/clients/client-fsx/src/pagination/DescribeDataRepositoryAssociationsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + DescribeDataRepositoryAssociationsCommand, + DescribeDataRepositoryAssociationsCommandInput, + DescribeDataRepositoryAssociationsCommandOutput, +} from "../commands/DescribeDataRepositoryAssociationsCommand"; +import { FSx } from "../FSx"; +import { FSxClient } from "../FSxClient"; +import { FSxPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FSxClient, + input: DescribeDataRepositoryAssociationsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new DescribeDataRepositoryAssociationsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: FSx, + input: DescribeDataRepositoryAssociationsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.describeDataRepositoryAssociations(input, ...args); +}; +export async function* paginateDescribeDataRepositoryAssociations( + config: FSxPaginationConfiguration, + input: DescribeDataRepositoryAssociationsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: DescribeDataRepositoryAssociationsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof FSx) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof FSxClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected FSx | FSxClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-fsx/src/pagination/DescribeSnapshotsPaginator.ts b/clients/client-fsx/src/pagination/DescribeSnapshotsPaginator.ts new file mode 100644 index 000000000000..a17f1579b5cc --- /dev/null +++ b/clients/client-fsx/src/pagination/DescribeSnapshotsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + DescribeSnapshotsCommand, + DescribeSnapshotsCommandInput, + DescribeSnapshotsCommandOutput, +} from "../commands/DescribeSnapshotsCommand"; +import { FSx } from "../FSx"; +import { FSxClient } from "../FSxClient"; +import { FSxPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: FSxClient, + input: DescribeSnapshotsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new DescribeSnapshotsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: FSx, + input: DescribeSnapshotsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.describeSnapshots(input, ...args); +}; +export async function* paginateDescribeSnapshots( + config: FSxPaginationConfiguration, + input: DescribeSnapshotsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: DescribeSnapshotsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof FSx) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof FSxClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected FSx | FSxClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-fsx/src/pagination/index.ts b/clients/client-fsx/src/pagination/index.ts index 4d1519b5d8f9..910d26ac911f 100644 --- a/clients/client-fsx/src/pagination/index.ts +++ b/clients/client-fsx/src/pagination/index.ts @@ -1,7 +1,9 @@ export * from "./DescribeBackupsPaginator"; +export * from "./DescribeDataRepositoryAssociationsPaginator"; export * from "./DescribeDataRepositoryTasksPaginator"; export * from "./DescribeFileSystemAliasesPaginator"; export * from "./DescribeFileSystemsPaginator"; +export * from "./DescribeSnapshotsPaginator"; export * from "./DescribeStorageVirtualMachinesPaginator"; export * from "./DescribeVolumesPaginator"; export * from "./Interfaces"; diff --git a/clients/client-fsx/src/protocols/Aws_json1_1.ts b/clients/client-fsx/src/protocols/Aws_json1_1.ts index 5292bae6fe06..f90b54478f54 100644 --- a/clients/client-fsx/src/protocols/Aws_json1_1.ts +++ b/clients/client-fsx/src/protocols/Aws_json1_1.ts @@ -28,6 +28,10 @@ import { } from "../commands/CancelDataRepositoryTaskCommand"; import { CopyBackupCommandInput, CopyBackupCommandOutput } from "../commands/CopyBackupCommand"; import { CreateBackupCommandInput, CreateBackupCommandOutput } from "../commands/CreateBackupCommand"; +import { + CreateDataRepositoryAssociationCommandInput, + CreateDataRepositoryAssociationCommandOutput, +} from "../commands/CreateDataRepositoryAssociationCommand"; import { CreateDataRepositoryTaskCommandInput, CreateDataRepositoryTaskCommandOutput, @@ -37,6 +41,7 @@ import { CreateFileSystemFromBackupCommandInput, CreateFileSystemFromBackupCommandOutput, } from "../commands/CreateFileSystemFromBackupCommand"; +import { CreateSnapshotCommandInput, CreateSnapshotCommandOutput } from "../commands/CreateSnapshotCommand"; import { CreateStorageVirtualMachineCommandInput, CreateStorageVirtualMachineCommandOutput, @@ -47,13 +52,22 @@ import { CreateVolumeFromBackupCommandOutput, } from "../commands/CreateVolumeFromBackupCommand"; import { DeleteBackupCommandInput, DeleteBackupCommandOutput } from "../commands/DeleteBackupCommand"; +import { + DeleteDataRepositoryAssociationCommandInput, + DeleteDataRepositoryAssociationCommandOutput, +} from "../commands/DeleteDataRepositoryAssociationCommand"; import { DeleteFileSystemCommandInput, DeleteFileSystemCommandOutput } from "../commands/DeleteFileSystemCommand"; +import { DeleteSnapshotCommandInput, DeleteSnapshotCommandOutput } from "../commands/DeleteSnapshotCommand"; import { DeleteStorageVirtualMachineCommandInput, DeleteStorageVirtualMachineCommandOutput, } from "../commands/DeleteStorageVirtualMachineCommand"; import { DeleteVolumeCommandInput, DeleteVolumeCommandOutput } from "../commands/DeleteVolumeCommand"; import { DescribeBackupsCommandInput, DescribeBackupsCommandOutput } from "../commands/DescribeBackupsCommand"; +import { + DescribeDataRepositoryAssociationsCommandInput, + DescribeDataRepositoryAssociationsCommandOutput, +} from "../commands/DescribeDataRepositoryAssociationsCommand"; import { DescribeDataRepositoryTasksCommandInput, DescribeDataRepositoryTasksCommandOutput, @@ -66,6 +80,7 @@ import { DescribeFileSystemsCommandInput, DescribeFileSystemsCommandOutput, } from "../commands/DescribeFileSystemsCommand"; +import { DescribeSnapshotsCommandInput, DescribeSnapshotsCommandOutput } from "../commands/DescribeSnapshotsCommand"; import { DescribeStorageVirtualMachinesCommandInput, DescribeStorageVirtualMachinesCommandOutput, @@ -79,9 +94,22 @@ import { ListTagsForResourceCommandInput, ListTagsForResourceCommandOutput, } from "../commands/ListTagsForResourceCommand"; +import { + ReleaseFileSystemNfsV3LocksCommandInput, + ReleaseFileSystemNfsV3LocksCommandOutput, +} from "../commands/ReleaseFileSystemNfsV3LocksCommand"; +import { + RestoreVolumeFromSnapshotCommandInput, + RestoreVolumeFromSnapshotCommandOutput, +} from "../commands/RestoreVolumeFromSnapshotCommand"; import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateDataRepositoryAssociationCommandInput, + UpdateDataRepositoryAssociationCommandOutput, +} from "../commands/UpdateDataRepositoryAssociationCommand"; import { UpdateFileSystemCommandInput, UpdateFileSystemCommandOutput } from "../commands/UpdateFileSystemCommand"; +import { UpdateSnapshotCommandInput, UpdateSnapshotCommandOutput } from "../commands/UpdateSnapshotCommand"; import { UpdateStorageVirtualMachineCommandInput, UpdateStorageVirtualMachineCommandOutput, @@ -95,6 +123,8 @@ import { Alias, AssociateFileSystemAliasesRequest, AssociateFileSystemAliasesResponse, + AutoExportPolicy, + AutoImportPolicy, Backup, BackupBeingCopied, BackupFailureDetails, @@ -109,16 +139,23 @@ import { CopyBackupResponse, CreateBackupRequest, CreateBackupResponse, + CreateDataRepositoryAssociationRequest, + CreateDataRepositoryAssociationResponse, CreateDataRepositoryTaskRequest, CreateDataRepositoryTaskResponse, CreateFileSystemFromBackupRequest, CreateFileSystemFromBackupResponse, CreateFileSystemLustreConfiguration, CreateFileSystemOntapConfiguration, + CreateFileSystemOpenZFSConfiguration, CreateFileSystemRequest, CreateFileSystemResponse, CreateFileSystemWindowsConfiguration, CreateOntapVolumeConfiguration, + CreateOpenZFSOriginSnapshotConfiguration, + CreateOpenZFSVolumeConfiguration, + CreateSnapshotRequest, + CreateSnapshotResponse, CreateStorageVirtualMachineRequest, CreateStorageVirtualMachineResponse, CreateSvmActiveDirectoryConfiguration, @@ -126,6 +163,8 @@ import { CreateVolumeFromBackupResponse, CreateVolumeRequest, CreateVolumeResponse, + DataRepositoryAssociation, + DataRepositoryAssociationNotFound, DataRepositoryConfiguration, DataRepositoryFailureDetails, DataRepositoryTask, @@ -137,26 +176,38 @@ import { DataRepositoryTaskStatus, DeleteBackupRequest, DeleteBackupResponse, + DeleteDataRepositoryAssociationRequest, + DeleteDataRepositoryAssociationResponse, DeleteFileSystemLustreConfiguration, DeleteFileSystemLustreResponse, + DeleteFileSystemOpenZFSConfiguration, + DeleteFileSystemOpenZFSResponse, DeleteFileSystemRequest, DeleteFileSystemResponse, DeleteFileSystemWindowsConfiguration, DeleteFileSystemWindowsResponse, + DeleteOpenZFSVolumeOption, + DeleteSnapshotRequest, + DeleteSnapshotResponse, DeleteStorageVirtualMachineRequest, DeleteStorageVirtualMachineResponse, DeleteVolumeOntapConfiguration, DeleteVolumeOntapResponse, + DeleteVolumeOpenZFSConfiguration, DeleteVolumeRequest, DeleteVolumeResponse, DescribeBackupsRequest, DescribeBackupsResponse, + DescribeDataRepositoryAssociationsRequest, + DescribeDataRepositoryAssociationsResponse, DescribeDataRepositoryTasksRequest, DescribeDataRepositoryTasksResponse, DescribeFileSystemAliasesRequest, DescribeFileSystemAliasesResponse, DescribeFileSystemsRequest, DescribeFileSystemsResponse, + DescribeSnapshotsRequest, + DescribeSnapshotsResponse, DescribeStorageVirtualMachinesRequest, DescribeStorageVirtualMachinesResponse, DescribeVolumesRequest, @@ -164,6 +215,7 @@ import { DisassociateFileSystemAliasesRequest, DisassociateFileSystemAliasesResponse, DiskIopsConfiguration, + EventType, FileSystem, FileSystemEndpoint, FileSystemEndpoints, @@ -174,6 +226,7 @@ import { IncompatibleParameterError, IncompatibleRegionForMultiAZ, InternalServerError, + InvalidDataRepositoryType, InvalidDestinationKmsKey, InvalidExportPath, InvalidImportPath, @@ -185,17 +238,35 @@ import { ListTagsForResourceRequest, ListTagsForResourceResponse, LustreFileSystemConfiguration, + LustreLogConfiguration, + LustreLogCreateConfiguration, MissingFileSystemConfiguration, MissingVolumeConfiguration, NotServiceResourceError, OntapFileSystemConfiguration, OntapVolumeConfiguration, + OpenZFSClientConfiguration, + OpenZFSCreateRootVolumeConfiguration, + OpenZFSFileSystemConfiguration, + OpenZFSNfsExport, + OpenZFSOriginSnapshotConfiguration, + OpenZFSUserOrGroupQuota, + OpenZFSVolumeConfiguration, + ReleaseFileSystemNfsV3LocksRequest, + ReleaseFileSystemNfsV3LocksResponse, ResourceDoesNotSupportTagging, ResourceNotFound, + RestoreOpenZFSVolumeOption, + RestoreVolumeFromSnapshotRequest, + RestoreVolumeFromSnapshotResponse, + S3DataRepositoryConfiguration, SelfManagedActiveDirectoryAttributes, SelfManagedActiveDirectoryConfiguration, SelfManagedActiveDirectoryConfigurationUpdates, ServiceLimitExceeded, + Snapshot, + SnapshotFilter, + SnapshotNotFound, SourceBackupUnavailable, StorageVirtualMachine, StorageVirtualMachineFilter, @@ -210,12 +281,18 @@ import { UnsupportedOperation, UntagResourceRequest, UntagResourceResponse, + UpdateDataRepositoryAssociationRequest, + UpdateDataRepositoryAssociationResponse, UpdateFileSystemLustreConfiguration, UpdateFileSystemOntapConfiguration, + UpdateFileSystemOpenZFSConfiguration, UpdateFileSystemRequest, UpdateFileSystemResponse, UpdateFileSystemWindowsConfiguration, UpdateOntapVolumeConfiguration, + UpdateOpenZFSVolumeConfiguration, + UpdateSnapshotRequest, + UpdateSnapshotResponse, UpdateStorageVirtualMachineRequest, UpdateStorageVirtualMachineResponse, UpdateSvmActiveDirectoryConfiguration, @@ -281,6 +358,19 @@ export const serializeAws_json1_1CreateBackupCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CreateDataRepositoryAssociationCommand = async ( + input: CreateDataRepositoryAssociationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.CreateDataRepositoryAssociation", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateDataRepositoryAssociationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateDataRepositoryTaskCommand = async ( input: CreateDataRepositoryTaskCommandInput, context: __SerdeContext @@ -320,6 +410,19 @@ export const serializeAws_json1_1CreateFileSystemFromBackupCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1CreateSnapshotCommand = async ( + input: CreateSnapshotCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.CreateSnapshot", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1CreateSnapshotRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1CreateStorageVirtualMachineCommand = async ( input: CreateStorageVirtualMachineCommandInput, context: __SerdeContext @@ -372,6 +475,19 @@ export const serializeAws_json1_1DeleteBackupCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteDataRepositoryAssociationCommand = async ( + input: DeleteDataRepositoryAssociationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.DeleteDataRepositoryAssociation", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteDataRepositoryAssociationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteFileSystemCommand = async ( input: DeleteFileSystemCommandInput, context: __SerdeContext @@ -385,6 +501,19 @@ export const serializeAws_json1_1DeleteFileSystemCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DeleteSnapshotCommand = async ( + input: DeleteSnapshotCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.DeleteSnapshot", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DeleteSnapshotRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DeleteStorageVirtualMachineCommand = async ( input: DeleteStorageVirtualMachineCommandInput, context: __SerdeContext @@ -424,6 +553,19 @@ export const serializeAws_json1_1DescribeBackupsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeDataRepositoryAssociationsCommand = async ( + input: DescribeDataRepositoryAssociationsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.DescribeDataRepositoryAssociations", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeDataRepositoryAssociationsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeDataRepositoryTasksCommand = async ( input: DescribeDataRepositoryTasksCommandInput, context: __SerdeContext @@ -463,6 +605,19 @@ export const serializeAws_json1_1DescribeFileSystemsCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1DescribeSnapshotsCommand = async ( + input: DescribeSnapshotsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.DescribeSnapshots", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1DescribeSnapshotsRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1DescribeStorageVirtualMachinesCommand = async ( input: DescribeStorageVirtualMachinesCommandInput, context: __SerdeContext @@ -515,6 +670,32 @@ export const serializeAws_json1_1ListTagsForResourceCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand = async ( + input: ReleaseFileSystemNfsV3LocksCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.ReleaseFileSystemNfsV3Locks", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1ReleaseFileSystemNfsV3LocksRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + +export const serializeAws_json1_1RestoreVolumeFromSnapshotCommand = async ( + input: RestoreVolumeFromSnapshotCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.RestoreVolumeFromSnapshot", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1RestoreVolumeFromSnapshotRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1TagResourceCommand = async ( input: TagResourceCommandInput, context: __SerdeContext @@ -541,6 +722,19 @@ export const serializeAws_json1_1UntagResourceCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateDataRepositoryAssociationCommand = async ( + input: UpdateDataRepositoryAssociationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.UpdateDataRepositoryAssociation", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateDataRepositoryAssociationRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1UpdateFileSystemCommand = async ( input: UpdateFileSystemCommandInput, context: __SerdeContext @@ -554,6 +748,19 @@ export const serializeAws_json1_1UpdateFileSystemCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateSnapshotCommand = async ( + input: UpdateSnapshotCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "AWSSimbaAPIService_v20180301.UpdateSnapshot", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateSnapshotRequest(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const serializeAws_json1_1UpdateStorageVirtualMachineCommand = async ( input: UpdateStorageVirtualMachineCommandInput, context: __SerdeContext @@ -980,6 +1187,100 @@ const deserializeAws_json1_1CreateBackupCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1CreateDataRepositoryAssociationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateDataRepositoryAssociationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateDataRepositoryAssociationResponse(data, context); + const response: CreateDataRepositoryAssociationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateDataRepositoryAssociationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "FileSystemNotFound": + case "com.amazonaws.fsx#FileSystemNotFound": + response = { + ...(await deserializeAws_json1_1FileSystemNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": + response = { + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperation": + case "com.amazonaws.fsx#UnsupportedOperation": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1CreateDataRepositoryTaskCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1326,6 +1627,84 @@ const deserializeAws_json1_1CreateFileSystemFromBackupCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1CreateSnapshotCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1CreateSnapshotCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1CreateSnapshotResponse(data, context); + const response: CreateSnapshotCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1CreateSnapshotCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": + response = { + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "VolumeNotFound": + case "com.amazonaws.fsx#VolumeNotFound": + response = { + ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1CreateStorageVirtualMachineCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1750,10 +2129,96 @@ const deserializeAws_json1_1DeleteBackupCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DeleteFileSystemCommand = async ( +export const deserializeAws_json1_1DeleteDataRepositoryAssociationCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteDataRepositoryAssociationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteDataRepositoryAssociationResponse(data, context); + const response: DeleteDataRepositoryAssociationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteDataRepositoryAssociationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "DataRepositoryAssociationNotFound": + case "com.amazonaws.fsx#DataRepositoryAssociationNotFound": + response = { + ...(await deserializeAws_json1_1DataRepositoryAssociationNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": + response = { + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1DeleteFileSystemCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { if (output.statusCode >= 300) { return deserializeAws_json1_1DeleteFileSystemCommandError(output, context); } @@ -1836,6 +2301,76 @@ const deserializeAws_json1_1DeleteFileSystemCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_json1_1DeleteSnapshotCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1DeleteSnapshotCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1DeleteSnapshotResponse(data, context); + const response: DeleteSnapshotCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1DeleteSnapshotCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "SnapshotNotFound": + case "com.amazonaws.fsx#SnapshotNotFound": + response = { + ...(await deserializeAws_json1_1SnapshotNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_json1_1DeleteStorageVirtualMachineCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2078,27 +2613,27 @@ const deserializeAws_json1_1DescribeBackupsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeDataRepositoryTasksCommand = async ( +export const deserializeAws_json1_1DescribeDataRepositoryAssociationsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeDataRepositoryTasksCommandError(output, context); + return deserializeAws_json1_1DescribeDataRepositoryAssociationsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeDataRepositoryTasksResponse(data, context); - const response: DescribeDataRepositoryTasksCommandOutput = { + contents = deserializeAws_json1_1DescribeDataRepositoryAssociationsResponse(data, context); + const response: DescribeDataRepositoryAssociationsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeDataRepositoryTasksCommandError = async ( +const deserializeAws_json1_1DescribeDataRepositoryAssociationsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2115,10 +2650,10 @@ const deserializeAws_json1_1DescribeDataRepositoryTasksCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "DataRepositoryTaskNotFound": - case "com.amazonaws.fsx#DataRepositoryTaskNotFound": + case "DataRepositoryAssociationNotFound": + case "com.amazonaws.fsx#DataRepositoryAssociationNotFound": response = { - ...(await deserializeAws_json1_1DataRepositoryTaskNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1DataRepositoryAssociationNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2139,6 +2674,14 @@ const deserializeAws_json1_1DescribeDataRepositoryTasksCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "InvalidDataRepositoryType": + case "com.amazonaws.fsx#InvalidDataRepositoryType": + response = { + ...(await deserializeAws_json1_1InvalidDataRepositoryTypeResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -2156,27 +2699,27 @@ const deserializeAws_json1_1DescribeDataRepositoryTasksCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeFileSystemAliasesCommand = async ( +export const deserializeAws_json1_1DescribeDataRepositoryTasksCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeFileSystemAliasesCommandError(output, context); + return deserializeAws_json1_1DescribeDataRepositoryTasksCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeFileSystemAliasesResponse(data, context); - const response: DescribeFileSystemAliasesCommandOutput = { + contents = deserializeAws_json1_1DescribeDataRepositoryTasksResponse(data, context); + const response: DescribeDataRepositoryTasksCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeFileSystemAliasesCommandError = async ( +const deserializeAws_json1_1DescribeDataRepositoryTasksCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2193,6 +2736,14 @@ const deserializeAws_json1_1DescribeFileSystemAliasesCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "DataRepositoryTaskNotFound": + case "com.amazonaws.fsx#DataRepositoryTaskNotFound": + response = { + ...(await deserializeAws_json1_1DataRepositoryTaskNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "FileSystemNotFound": case "com.amazonaws.fsx#FileSystemNotFound": response = { @@ -2226,27 +2777,27 @@ const deserializeAws_json1_1DescribeFileSystemAliasesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeFileSystemsCommand = async ( +export const deserializeAws_json1_1DescribeFileSystemAliasesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeFileSystemsCommandError(output, context); + return deserializeAws_json1_1DescribeFileSystemAliasesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeFileSystemsResponse(data, context); - const response: DescribeFileSystemsCommandOutput = { + contents = deserializeAws_json1_1DescribeFileSystemAliasesResponse(data, context); + const response: DescribeFileSystemAliasesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeFileSystemsCommandError = async ( +const deserializeAws_json1_1DescribeFileSystemAliasesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2296,27 +2847,27 @@ const deserializeAws_json1_1DescribeFileSystemsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeStorageVirtualMachinesCommand = async ( +export const deserializeAws_json1_1DescribeFileSystemsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError(output, context); + return deserializeAws_json1_1DescribeFileSystemsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeStorageVirtualMachinesResponse(data, context); - const response: DescribeStorageVirtualMachinesCommandOutput = { + contents = deserializeAws_json1_1DescribeFileSystemsResponse(data, context); + const response: DescribeFileSystemsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError = async ( +const deserializeAws_json1_1DescribeFileSystemsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2333,18 +2884,18 @@ const deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InternalServerError": - case "com.amazonaws.fsx#InternalServerError": + case "FileSystemNotFound": + case "com.amazonaws.fsx#FileSystemNotFound": response = { - ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1FileSystemNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "StorageVirtualMachineNotFound": - case "com.amazonaws.fsx#StorageVirtualMachineNotFound": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1StorageVirtualMachineNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2366,27 +2917,27 @@ const deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DescribeVolumesCommand = async ( +export const deserializeAws_json1_1DescribeSnapshotsCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeVolumesCommandError(output, context); + return deserializeAws_json1_1DescribeSnapshotsCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DescribeVolumesResponse(data, context); - const response: DescribeVolumesCommandOutput = { + contents = deserializeAws_json1_1DescribeSnapshotsResponse(data, context); + const response: DescribeSnapshotsCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DescribeVolumesCommandError = async ( +const deserializeAws_json1_1DescribeSnapshotsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2411,10 +2962,10 @@ const deserializeAws_json1_1DescribeVolumesCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "VolumeNotFound": - case "com.amazonaws.fsx#VolumeNotFound": + case "SnapshotNotFound": + case "com.amazonaws.fsx#SnapshotNotFound": response = { - ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1SnapshotNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2436,27 +2987,27 @@ const deserializeAws_json1_1DescribeVolumesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1DisassociateFileSystemAliasesCommand = async ( +export const deserializeAws_json1_1DescribeStorageVirtualMachinesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1DisassociateFileSystemAliasesCommandError(output, context); + return deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1DisassociateFileSystemAliasesResponse(data, context); - const response: DisassociateFileSystemAliasesCommandOutput = { + contents = deserializeAws_json1_1DescribeStorageVirtualMachinesResponse(data, context); + const response: DescribeStorageVirtualMachinesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1DisassociateFileSystemAliasesCommandError = async ( +const deserializeAws_json1_1DescribeStorageVirtualMachinesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2473,18 +3024,18 @@ const deserializeAws_json1_1DisassociateFileSystemAliasesCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "FileSystemNotFound": - case "com.amazonaws.fsx#FileSystemNotFound": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1FileSystemNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InternalServerError": - case "com.amazonaws.fsx#InternalServerError": + case "StorageVirtualMachineNotFound": + case "com.amazonaws.fsx#StorageVirtualMachineNotFound": response = { - ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1StorageVirtualMachineNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2506,27 +3057,27 @@ const deserializeAws_json1_1DisassociateFileSystemAliasesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1ListTagsForResourceCommand = async ( +export const deserializeAws_json1_1DescribeVolumesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1ListTagsForResourceCommandError(output, context); + return deserializeAws_json1_1DescribeVolumesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1ListTagsForResourceResponse(data, context); - const response: ListTagsForResourceCommandOutput = { + contents = deserializeAws_json1_1DescribeVolumesResponse(data, context); + const response: DescribeVolumesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1ListTagsForResourceCommandError = async ( +const deserializeAws_json1_1DescribeVolumesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2551,26 +3102,10 @@ const deserializeAws_json1_1ListTagsForResourceCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "NotServiceResourceError": - case "com.amazonaws.fsx#NotServiceResourceError": - response = { - ...(await deserializeAws_json1_1NotServiceResourceErrorResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ResourceDoesNotSupportTagging": - case "com.amazonaws.fsx#ResourceDoesNotSupportTagging": - response = { - ...(await deserializeAws_json1_1ResourceDoesNotSupportTaggingResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ResourceNotFound": - case "com.amazonaws.fsx#ResourceNotFound": + case "VolumeNotFound": + case "com.amazonaws.fsx#VolumeNotFound": response = { - ...(await deserializeAws_json1_1ResourceNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2592,27 +3127,27 @@ const deserializeAws_json1_1ListTagsForResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1TagResourceCommand = async ( +export const deserializeAws_json1_1DisassociateFileSystemAliasesCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1TagResourceCommandError(output, context); + return deserializeAws_json1_1DisassociateFileSystemAliasesCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1TagResourceResponse(data, context); - const response: TagResourceCommandOutput = { + contents = deserializeAws_json1_1DisassociateFileSystemAliasesResponse(data, context); + const response: DisassociateFileSystemAliasesCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1TagResourceCommandError = async ( +const deserializeAws_json1_1DisassociateFileSystemAliasesCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2629,34 +3164,18 @@ const deserializeAws_json1_1TagResourceCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "InternalServerError": - case "com.amazonaws.fsx#InternalServerError": - response = { - ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "NotServiceResourceError": - case "com.amazonaws.fsx#NotServiceResourceError": - response = { - ...(await deserializeAws_json1_1NotServiceResourceErrorResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ResourceDoesNotSupportTagging": - case "com.amazonaws.fsx#ResourceDoesNotSupportTagging": + case "FileSystemNotFound": + case "com.amazonaws.fsx#FileSystemNotFound": response = { - ...(await deserializeAws_json1_1ResourceDoesNotSupportTaggingResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1FileSystemNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ResourceNotFound": - case "com.amazonaws.fsx#ResourceNotFound": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1ResourceNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2678,27 +3197,27 @@ const deserializeAws_json1_1TagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UntagResourceCommand = async ( +export const deserializeAws_json1_1ListTagsForResourceCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UntagResourceCommandError(output, context); + return deserializeAws_json1_1ListTagsForResourceCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UntagResourceResponse(data, context); - const response: UntagResourceCommandOutput = { + contents = deserializeAws_json1_1ListTagsForResourceResponse(data, context); + const response: ListTagsForResourceCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UntagResourceCommandError = async ( +const deserializeAws_json1_1ListTagsForResourceCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2764,27 +3283,27 @@ const deserializeAws_json1_1UntagResourceCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UpdateFileSystemCommand = async ( +export const deserializeAws_json1_1ReleaseFileSystemNfsV3LocksCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateFileSystemCommandError(output, context); + return deserializeAws_json1_1ReleaseFileSystemNfsV3LocksCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UpdateFileSystemResponse(data, context); - const response: UpdateFileSystemCommandOutput = { + contents = deserializeAws_json1_1ReleaseFileSystemNfsV3LocksResponse(data, context); + const response: ReleaseFileSystemNfsV3LocksCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UpdateFileSystemCommandError = async ( +const deserializeAws_json1_1ReleaseFileSystemNfsV3LocksCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2825,26 +3344,80 @@ const deserializeAws_json1_1UpdateFileSystemCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "MissingFileSystemConfiguration": - case "com.amazonaws.fsx#MissingFileSystemConfiguration": + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": response = { - ...(await deserializeAws_json1_1MissingFileSystemConfigurationResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "ServiceLimitExceeded": - case "com.amazonaws.fsx#ServiceLimitExceeded": + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; response = { - ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1RestoreVolumeFromSnapshotCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1RestoreVolumeFromSnapshotCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1RestoreVolumeFromSnapshotResponse(data, context); + const response: RestoreVolumeFromSnapshotCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1RestoreVolumeFromSnapshotCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "UnsupportedOperation": - case "com.amazonaws.fsx#UnsupportedOperation": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1UnsupportedOperationResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "VolumeNotFound": + case "com.amazonaws.fsx#VolumeNotFound": + response = { + ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2866,27 +3439,27 @@ const deserializeAws_json1_1UpdateFileSystemCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UpdateStorageVirtualMachineCommand = async ( +export const deserializeAws_json1_1TagResourceCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateStorageVirtualMachineCommandError(output, context); + return deserializeAws_json1_1TagResourceCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UpdateStorageVirtualMachineResponse(data, context); - const response: UpdateStorageVirtualMachineCommandOutput = { + contents = deserializeAws_json1_1TagResourceResponse(data, context); + const response: TagResourceCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UpdateStorageVirtualMachineCommandError = async ( +const deserializeAws_json1_1TagResourceCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2903,34 +3476,34 @@ const deserializeAws_json1_1UpdateStorageVirtualMachineCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "IncompatibleParameterError": - case "com.amazonaws.fsx#IncompatibleParameterError": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InternalServerError": - case "com.amazonaws.fsx#InternalServerError": + case "NotServiceResourceError": + case "com.amazonaws.fsx#NotServiceResourceError": response = { - ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1NotServiceResourceErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "StorageVirtualMachineNotFound": - case "com.amazonaws.fsx#StorageVirtualMachineNotFound": + case "ResourceDoesNotSupportTagging": + case "com.amazonaws.fsx#ResourceDoesNotSupportTagging": response = { - ...(await deserializeAws_json1_1StorageVirtualMachineNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ResourceDoesNotSupportTaggingResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "UnsupportedOperation": - case "com.amazonaws.fsx#UnsupportedOperation": + case "ResourceNotFound": + case "com.amazonaws.fsx#ResourceNotFound": response = { - ...(await deserializeAws_json1_1UnsupportedOperationResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ResourceNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -2952,27 +3525,27 @@ const deserializeAws_json1_1UpdateStorageVirtualMachineCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_json1_1UpdateVolumeCommand = async ( +export const deserializeAws_json1_1UntagResourceCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateVolumeCommandError(output, context); + return deserializeAws_json1_1UntagResourceCommandError(output, context); } const data: any = await parseBody(output.body, context); let contents: any = {}; - contents = deserializeAws_json1_1UpdateVolumeResponse(data, context); - const response: UpdateVolumeCommandOutput = { + contents = deserializeAws_json1_1UntagResourceResponse(data, context); + const response: UntagResourceCommandOutput = { $metadata: deserializeMetadata(output), ...contents, }; return Promise.resolve(response); }; -const deserializeAws_json1_1UpdateVolumeCommandError = async ( +const deserializeAws_json1_1UntagResourceCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2989,34 +3562,34 @@ const deserializeAws_json1_1UpdateVolumeCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "IncompatibleParameterError": - case "com.amazonaws.fsx#IncompatibleParameterError": + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": response = { - ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "InternalServerError": - case "com.amazonaws.fsx#InternalServerError": + case "NotServiceResourceError": + case "com.amazonaws.fsx#NotServiceResourceError": response = { - ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1NotServiceResourceErrorResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "MissingVolumeConfiguration": - case "com.amazonaws.fsx#MissingVolumeConfiguration": + case "ResourceDoesNotSupportTagging": + case "com.amazonaws.fsx#ResourceDoesNotSupportTagging": response = { - ...(await deserializeAws_json1_1MissingVolumeConfigurationResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ResourceDoesNotSupportTaggingResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; break; - case "VolumeNotFound": - case "com.amazonaws.fsx#VolumeNotFound": + case "ResourceNotFound": + case "com.amazonaws.fsx#ResourceNotFound": response = { - ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), + ...(await deserializeAws_json1_1ResourceNotFoundResponse(parsedOutput, context)), name: errorCode, $metadata: deserializeMetadata(output), }; @@ -3038,23 +3611,453 @@ const deserializeAws_json1_1UpdateVolumeCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -const deserializeAws_json1_1ActiveDirectoryErrorResponse = async ( - parsedOutput: any, +export const deserializeAws_json1_1UpdateDataRepositoryAssociationCommand = async ( + output: __HttpResponse, context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1ActiveDirectoryError(body, context); - const contents: ActiveDirectoryError = { - name: "ActiveDirectoryError", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateDataRepositoryAssociationCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateDataRepositoryAssociationResponse(data, context); + const response: UpdateDataRepositoryAssociationCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, }; - return contents; + return Promise.resolve(response); }; -const deserializeAws_json1_1BackupBeingCopiedResponse = async ( - parsedOutput: any, +const deserializeAws_json1_1UpdateDataRepositoryAssociationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "DataRepositoryAssociationNotFound": + case "com.amazonaws.fsx#DataRepositoryAssociationNotFound": + response = { + ...(await deserializeAws_json1_1DataRepositoryAssociationNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": + response = { + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateFileSystemCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateFileSystemCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateFileSystemResponse(data, context); + const response: UpdateFileSystemCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateFileSystemCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "FileSystemNotFound": + case "com.amazonaws.fsx#FileSystemNotFound": + response = { + ...(await deserializeAws_json1_1FileSystemNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingFileSystemConfiguration": + case "com.amazonaws.fsx#MissingFileSystemConfiguration": + response = { + ...(await deserializeAws_json1_1MissingFileSystemConfigurationResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceLimitExceeded": + case "com.amazonaws.fsx#ServiceLimitExceeded": + response = { + ...(await deserializeAws_json1_1ServiceLimitExceededResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperation": + case "com.amazonaws.fsx#UnsupportedOperation": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateSnapshotCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateSnapshotCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateSnapshotResponse(data, context); + const response: UpdateSnapshotCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateSnapshotCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "SnapshotNotFound": + case "com.amazonaws.fsx#SnapshotNotFound": + response = { + ...(await deserializeAws_json1_1SnapshotNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateStorageVirtualMachineCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateStorageVirtualMachineCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateStorageVirtualMachineResponse(data, context); + const response: UpdateStorageVirtualMachineCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateStorageVirtualMachineCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "StorageVirtualMachineNotFound": + case "com.amazonaws.fsx#StorageVirtualMachineNotFound": + response = { + ...(await deserializeAws_json1_1StorageVirtualMachineNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnsupportedOperation": + case "com.amazonaws.fsx#UnsupportedOperation": + response = { + ...(await deserializeAws_json1_1UnsupportedOperationResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateVolumeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateVolumeCommandError(output, context); + } + const data: any = await parseBody(output.body, context); + let contents: any = {}; + contents = deserializeAws_json1_1UpdateVolumeResponse(data, context); + const response: UpdateVolumeCommandOutput = { + $metadata: deserializeMetadata(output), + ...contents, + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateVolumeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequest": + case "com.amazonaws.fsx#BadRequest": + response = { + ...(await deserializeAws_json1_1BadRequestResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "IncompatibleParameterError": + case "com.amazonaws.fsx#IncompatibleParameterError": + response = { + ...(await deserializeAws_json1_1IncompatibleParameterErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerError": + case "com.amazonaws.fsx#InternalServerError": + response = { + ...(await deserializeAws_json1_1InternalServerErrorResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "MissingVolumeConfiguration": + case "com.amazonaws.fsx#MissingVolumeConfiguration": + response = { + ...(await deserializeAws_json1_1MissingVolumeConfigurationResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "VolumeNotFound": + case "com.amazonaws.fsx#VolumeNotFound": + response = { + ...(await deserializeAws_json1_1VolumeNotFoundResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_json1_1ActiveDirectoryErrorResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1ActiveDirectoryError(body, context); + const contents: ActiveDirectoryError = { + name: "ActiveDirectoryError", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + +const deserializeAws_json1_1BackupBeingCopiedResponse = async ( + parsedOutput: any, context: __SerdeContext ): Promise => { const body = parsedOutput.body; @@ -3128,6 +4131,21 @@ const deserializeAws_json1_1BadRequestResponse = async ( return contents; }; +const deserializeAws_json1_1DataRepositoryAssociationNotFoundResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1DataRepositoryAssociationNotFound(body, context); + const contents: DataRepositoryAssociationNotFound = { + name: "DataRepositoryAssociationNotFound", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1DataRepositoryTaskEndedResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3233,6 +4251,21 @@ const deserializeAws_json1_1InternalServerErrorResponse = async ( return contents; }; +const deserializeAws_json1_1InvalidDataRepositoryTypeResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidDataRepositoryType(body, context); + const contents: InvalidDataRepositoryType = { + name: "InvalidDataRepositoryType", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1InvalidDestinationKmsKeyResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3428,6 +4461,21 @@ const deserializeAws_json1_1ServiceLimitExceededResponse = async ( return contents; }; +const deserializeAws_json1_1SnapshotNotFoundResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1SnapshotNotFound(body, context); + const contents: SnapshotNotFound = { + name: "SnapshotNotFound", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1SourceBackupUnavailableResponse = async ( parsedOutput: any, context: __SerdeContext @@ -3511,6 +4559,20 @@ const serializeAws_json1_1AssociateFileSystemAliasesRequest = ( }; }; +const serializeAws_json1_1AutoExportPolicy = (input: AutoExportPolicy, context: __SerdeContext): any => { + return { + ...(input.Events !== undefined && + input.Events !== null && { Events: serializeAws_json1_1EventTypes(input.Events, context) }), + }; +}; + +const serializeAws_json1_1AutoImportPolicy = (input: AutoImportPolicy, context: __SerdeContext): any => { + return { + ...(input.Events !== undefined && + input.Events !== null && { Events: serializeAws_json1_1EventTypes(input.Events, context) }), + }; +}; + const serializeAws_json1_1BackupIds = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -3561,6 +4623,27 @@ const serializeAws_json1_1CreateBackupRequest = (input: CreateBackupRequest, con }; }; +const serializeAws_json1_1CreateDataRepositoryAssociationRequest = ( + input: CreateDataRepositoryAssociationRequest, + context: __SerdeContext +): any => { + return { + ...(input.BatchImportMetaDataOnCreate !== undefined && + input.BatchImportMetaDataOnCreate !== null && { BatchImportMetaDataOnCreate: input.BatchImportMetaDataOnCreate }), + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.DataRepositoryPath !== undefined && + input.DataRepositoryPath !== null && { DataRepositoryPath: input.DataRepositoryPath }), + ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), + ...(input.FileSystemPath !== undefined && + input.FileSystemPath !== null && { FileSystemPath: input.FileSystemPath }), + ...(input.ImportedFileChunkSize !== undefined && + input.ImportedFileChunkSize !== null && { ImportedFileChunkSize: input.ImportedFileChunkSize }), + ...(input.S3 !== undefined && + input.S3 !== null && { S3: serializeAws_json1_1S3DataRepositoryConfiguration(input.S3, context) }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_1Tags(input.Tags, context) }), + }; +}; + const serializeAws_json1_1CreateDataRepositoryTaskRequest = ( input: CreateDataRepositoryTaskRequest, context: __SerdeContext @@ -3594,6 +4677,13 @@ const serializeAws_json1_1CreateFileSystemFromBackupRequest = ( context ), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1CreateFileSystemOpenZFSConfiguration( + input.OpenZFSConfiguration, + context + ), + }), ...(input.SecurityGroupIds !== undefined && input.SecurityGroupIds !== null && { SecurityGroupIds: serializeAws_json1_1SecurityGroupIds(input.SecurityGroupIds, context), @@ -3639,6 +4729,10 @@ const serializeAws_json1_1CreateFileSystemLustreConfiguration = ( ...(input.ImportPath !== undefined && input.ImportPath !== null && { ImportPath: input.ImportPath }), ...(input.ImportedFileChunkSize !== undefined && input.ImportedFileChunkSize !== null && { ImportedFileChunkSize: input.ImportedFileChunkSize }), + ...(input.LogConfiguration !== undefined && + input.LogConfiguration !== null && { + LogConfiguration: serializeAws_json1_1LustreLogCreateConfiguration(input.LogConfiguration, context), + }), ...(input.PerUnitStorageThroughput !== undefined && input.PerUnitStorageThroughput !== null && { PerUnitStorageThroughput: input.PerUnitStorageThroughput }), ...(input.WeeklyMaintenanceStartTime !== undefined && @@ -3646,8 +4740,44 @@ const serializeAws_json1_1CreateFileSystemLustreConfiguration = ( }; }; -const serializeAws_json1_1CreateFileSystemOntapConfiguration = ( - input: CreateFileSystemOntapConfiguration, +const serializeAws_json1_1CreateFileSystemOntapConfiguration = ( + input: CreateFileSystemOntapConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.AutomaticBackupRetentionDays !== undefined && + input.AutomaticBackupRetentionDays !== null && { + AutomaticBackupRetentionDays: input.AutomaticBackupRetentionDays, + }), + ...(input.DailyAutomaticBackupStartTime !== undefined && + input.DailyAutomaticBackupStartTime !== null && { + DailyAutomaticBackupStartTime: input.DailyAutomaticBackupStartTime, + }), + ...(input.DeploymentType !== undefined && + input.DeploymentType !== null && { DeploymentType: input.DeploymentType }), + ...(input.DiskIopsConfiguration !== undefined && + input.DiskIopsConfiguration !== null && { + DiskIopsConfiguration: serializeAws_json1_1DiskIopsConfiguration(input.DiskIopsConfiguration, context), + }), + ...(input.EndpointIpAddressRange !== undefined && + input.EndpointIpAddressRange !== null && { EndpointIpAddressRange: input.EndpointIpAddressRange }), + ...(input.FsxAdminPassword !== undefined && + input.FsxAdminPassword !== null && { FsxAdminPassword: input.FsxAdminPassword }), + ...(input.PreferredSubnetId !== undefined && + input.PreferredSubnetId !== null && { PreferredSubnetId: input.PreferredSubnetId }), + ...(input.RouteTableIds !== undefined && + input.RouteTableIds !== null && { + RouteTableIds: serializeAws_json1_1RouteTableIds(input.RouteTableIds, context), + }), + ...(input.ThroughputCapacity !== undefined && + input.ThroughputCapacity !== null && { ThroughputCapacity: input.ThroughputCapacity }), + ...(input.WeeklyMaintenanceStartTime !== undefined && + input.WeeklyMaintenanceStartTime !== null && { WeeklyMaintenanceStartTime: input.WeeklyMaintenanceStartTime }), + }; +}; + +const serializeAws_json1_1CreateFileSystemOpenZFSConfiguration = ( + input: CreateFileSystemOpenZFSConfiguration, context: __SerdeContext ): any => { return { @@ -3655,6 +4785,10 @@ const serializeAws_json1_1CreateFileSystemOntapConfiguration = ( input.AutomaticBackupRetentionDays !== null && { AutomaticBackupRetentionDays: input.AutomaticBackupRetentionDays, }), + ...(input.CopyTagsToBackups !== undefined && + input.CopyTagsToBackups !== null && { CopyTagsToBackups: input.CopyTagsToBackups }), + ...(input.CopyTagsToVolumes !== undefined && + input.CopyTagsToVolumes !== null && { CopyTagsToVolumes: input.CopyTagsToVolumes }), ...(input.DailyAutomaticBackupStartTime !== undefined && input.DailyAutomaticBackupStartTime !== null && { DailyAutomaticBackupStartTime: input.DailyAutomaticBackupStartTime, @@ -3665,15 +4799,12 @@ const serializeAws_json1_1CreateFileSystemOntapConfiguration = ( input.DiskIopsConfiguration !== null && { DiskIopsConfiguration: serializeAws_json1_1DiskIopsConfiguration(input.DiskIopsConfiguration, context), }), - ...(input.EndpointIpAddressRange !== undefined && - input.EndpointIpAddressRange !== null && { EndpointIpAddressRange: input.EndpointIpAddressRange }), - ...(input.FsxAdminPassword !== undefined && - input.FsxAdminPassword !== null && { FsxAdminPassword: input.FsxAdminPassword }), - ...(input.PreferredSubnetId !== undefined && - input.PreferredSubnetId !== null && { PreferredSubnetId: input.PreferredSubnetId }), - ...(input.RouteTableIds !== undefined && - input.RouteTableIds !== null && { - RouteTableIds: serializeAws_json1_1RouteTableIds(input.RouteTableIds, context), + ...(input.RootVolumeConfiguration !== undefined && + input.RootVolumeConfiguration !== null && { + RootVolumeConfiguration: serializeAws_json1_1OpenZFSCreateRootVolumeConfiguration( + input.RootVolumeConfiguration, + context + ), }), ...(input.ThroughputCapacity !== undefined && input.ThroughputCapacity !== null && { ThroughputCapacity: input.ThroughputCapacity }), @@ -3701,6 +4832,13 @@ const serializeAws_json1_1CreateFileSystemRequest = (input: CreateFileSystemRequ input.OntapConfiguration !== null && { OntapConfiguration: serializeAws_json1_1CreateFileSystemOntapConfiguration(input.OntapConfiguration, context), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1CreateFileSystemOpenZFSConfiguration( + input.OpenZFSConfiguration, + context + ), + }), ...(input.SecurityGroupIds !== undefined && input.SecurityGroupIds !== null && { SecurityGroupIds: serializeAws_json1_1SecurityGroupIds(input.SecurityGroupIds, context), @@ -3785,6 +4923,56 @@ const serializeAws_json1_1CreateOntapVolumeConfiguration = ( }; }; +const serializeAws_json1_1CreateOpenZFSOriginSnapshotConfiguration = ( + input: CreateOpenZFSOriginSnapshotConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.CopyStrategy !== undefined && input.CopyStrategy !== null && { CopyStrategy: input.CopyStrategy }), + ...(input.SnapshotARN !== undefined && input.SnapshotARN !== null && { SnapshotARN: input.SnapshotARN }), + }; +}; + +const serializeAws_json1_1CreateOpenZFSVolumeConfiguration = ( + input: CreateOpenZFSVolumeConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.CopyTagsToSnapshots !== undefined && + input.CopyTagsToSnapshots !== null && { CopyTagsToSnapshots: input.CopyTagsToSnapshots }), + ...(input.DataCompressionType !== undefined && + input.DataCompressionType !== null && { DataCompressionType: input.DataCompressionType }), + ...(input.NfsExports !== undefined && + input.NfsExports !== null && { NfsExports: serializeAws_json1_1OpenZFSNfsExports(input.NfsExports, context) }), + ...(input.OriginSnapshot !== undefined && + input.OriginSnapshot !== null && { + OriginSnapshot: serializeAws_json1_1CreateOpenZFSOriginSnapshotConfiguration(input.OriginSnapshot, context), + }), + ...(input.ParentVolumeId !== undefined && + input.ParentVolumeId !== null && { ParentVolumeId: input.ParentVolumeId }), + ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.StorageCapacityQuotaGiB !== undefined && + input.StorageCapacityQuotaGiB !== null && { StorageCapacityQuotaGiB: input.StorageCapacityQuotaGiB }), + ...(input.StorageCapacityReservationGiB !== undefined && + input.StorageCapacityReservationGiB !== null && { + StorageCapacityReservationGiB: input.StorageCapacityReservationGiB, + }), + ...(input.UserAndGroupQuotas !== undefined && + input.UserAndGroupQuotas !== null && { + UserAndGroupQuotas: serializeAws_json1_1OpenZFSUserAndGroupQuotas(input.UserAndGroupQuotas, context), + }), + }; +}; + +const serializeAws_json1_1CreateSnapshotRequest = (input: CreateSnapshotRequest, context: __SerdeContext): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_1Tags(input.Tags, context) }), + ...(input.VolumeId !== undefined && input.VolumeId !== null && { VolumeId: input.VolumeId }), + }; +}; + const serializeAws_json1_1CreateStorageVirtualMachineRequest = ( input: CreateStorageVirtualMachineRequest, context: __SerdeContext @@ -3848,11 +5036,26 @@ const serializeAws_json1_1CreateVolumeRequest = (input: CreateVolumeRequest, con input.OntapConfiguration !== null && { OntapConfiguration: serializeAws_json1_1CreateOntapVolumeConfiguration(input.OntapConfiguration, context), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1CreateOpenZFSVolumeConfiguration(input.OpenZFSConfiguration, context), + }), ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_json1_1Tags(input.Tags, context) }), ...(input.VolumeType !== undefined && input.VolumeType !== null && { VolumeType: input.VolumeType }), }; }; +const serializeAws_json1_1DataRepositoryAssociationIds = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1DataRepositoryTaskFilter = ( input: DataRepositoryTaskFilter, context: __SerdeContext @@ -3907,6 +5110,18 @@ const serializeAws_json1_1DeleteBackupRequest = (input: DeleteBackupRequest, con }; }; +const serializeAws_json1_1DeleteDataRepositoryAssociationRequest = ( + input: DeleteDataRepositoryAssociationRequest, + context: __SerdeContext +): any => { + return { + ...(input.AssociationId !== undefined && input.AssociationId !== null && { AssociationId: input.AssociationId }), + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.DeleteDataInFileSystem !== undefined && + input.DeleteDataInFileSystem !== null && { DeleteDataInFileSystem: input.DeleteDataInFileSystem }), + }; +}; + const serializeAws_json1_1DeleteFileSystemLustreConfiguration = ( input: DeleteFileSystemLustreConfiguration, context: __SerdeContext @@ -3919,6 +5134,18 @@ const serializeAws_json1_1DeleteFileSystemLustreConfiguration = ( }; }; +const serializeAws_json1_1DeleteFileSystemOpenZFSConfiguration = ( + input: DeleteFileSystemOpenZFSConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.FinalBackupTags !== undefined && + input.FinalBackupTags !== null && { FinalBackupTags: serializeAws_json1_1Tags(input.FinalBackupTags, context) }), + ...(input.SkipFinalBackup !== undefined && + input.SkipFinalBackup !== null && { SkipFinalBackup: input.SkipFinalBackup }), + }; +}; + const serializeAws_json1_1DeleteFileSystemRequest = (input: DeleteFileSystemRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), @@ -3930,6 +5157,13 @@ const serializeAws_json1_1DeleteFileSystemRequest = (input: DeleteFileSystemRequ context ), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1DeleteFileSystemOpenZFSConfiguration( + input.OpenZFSConfiguration, + context + ), + }), ...(input.WindowsConfiguration !== undefined && input.WindowsConfiguration !== null && { WindowsConfiguration: serializeAws_json1_1DeleteFileSystemWindowsConfiguration( @@ -3952,6 +5186,27 @@ const serializeAws_json1_1DeleteFileSystemWindowsConfiguration = ( }; }; +const serializeAws_json1_1DeleteOpenZFSVolumeOptions = ( + input: (DeleteOpenZFSVolumeOption | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1DeleteSnapshotRequest = (input: DeleteSnapshotRequest, context: __SerdeContext): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.SnapshotId !== undefined && input.SnapshotId !== null && { SnapshotId: input.SnapshotId }), + }; +}; + const serializeAws_json1_1DeleteStorageVirtualMachineRequest = ( input: DeleteStorageVirtualMachineRequest, context: __SerdeContext @@ -3975,6 +5230,16 @@ const serializeAws_json1_1DeleteVolumeOntapConfiguration = ( }; }; +const serializeAws_json1_1DeleteVolumeOpenZFSConfiguration = ( + input: DeleteVolumeOpenZFSConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.Options !== undefined && + input.Options !== null && { Options: serializeAws_json1_1DeleteOpenZFSVolumeOptions(input.Options, context) }), + }; +}; + const serializeAws_json1_1DeleteVolumeRequest = (input: DeleteVolumeRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), @@ -3982,6 +5247,10 @@ const serializeAws_json1_1DeleteVolumeRequest = (input: DeleteVolumeRequest, con input.OntapConfiguration !== null && { OntapConfiguration: serializeAws_json1_1DeleteVolumeOntapConfiguration(input.OntapConfiguration, context), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1DeleteVolumeOpenZFSConfiguration(input.OpenZFSConfiguration, context), + }), ...(input.VolumeId !== undefined && input.VolumeId !== null && { VolumeId: input.VolumeId }), }; }; @@ -3997,6 +5266,22 @@ const serializeAws_json1_1DescribeBackupsRequest = (input: DescribeBackupsReques }; }; +const serializeAws_json1_1DescribeDataRepositoryAssociationsRequest = ( + input: DescribeDataRepositoryAssociationsRequest, + context: __SerdeContext +): any => { + return { + ...(input.AssociationIds !== undefined && + input.AssociationIds !== null && { + AssociationIds: serializeAws_json1_1DataRepositoryAssociationIds(input.AssociationIds, context), + }), + ...(input.Filters !== undefined && + input.Filters !== null && { Filters: serializeAws_json1_1Filters(input.Filters, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + const serializeAws_json1_1DescribeDataRepositoryTasksRequest = ( input: DescribeDataRepositoryTasksRequest, context: __SerdeContext @@ -4011,79 +5296,230 @@ const serializeAws_json1_1DescribeDataRepositoryTasksRequest = ( }; }; -const serializeAws_json1_1DescribeFileSystemAliasesRequest = ( - input: DescribeFileSystemAliasesRequest, +const serializeAws_json1_1DescribeFileSystemAliasesRequest = ( + input: DescribeFileSystemAliasesRequest, + context: __SerdeContext +): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + +const serializeAws_json1_1DescribeFileSystemsRequest = ( + input: DescribeFileSystemsRequest, + context: __SerdeContext +): any => { + return { + ...(input.FileSystemIds !== undefined && + input.FileSystemIds !== null && { + FileSystemIds: serializeAws_json1_1FileSystemIds(input.FileSystemIds, context), + }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }; +}; + +const serializeAws_json1_1DescribeSnapshotsRequest = ( + input: DescribeSnapshotsRequest, + context: __SerdeContext +): any => { + return { + ...(input.Filters !== undefined && + input.Filters !== null && { Filters: serializeAws_json1_1SnapshotFilters(input.Filters, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.SnapshotIds !== undefined && + input.SnapshotIds !== null && { SnapshotIds: serializeAws_json1_1SnapshotIds(input.SnapshotIds, context) }), + }; +}; + +const serializeAws_json1_1DescribeStorageVirtualMachinesRequest = ( + input: DescribeStorageVirtualMachinesRequest, + context: __SerdeContext +): any => { + return { + ...(input.Filters !== undefined && + input.Filters !== null && { Filters: serializeAws_json1_1StorageVirtualMachineFilters(input.Filters, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.StorageVirtualMachineIds !== undefined && + input.StorageVirtualMachineIds !== null && { + StorageVirtualMachineIds: serializeAws_json1_1StorageVirtualMachineIds(input.StorageVirtualMachineIds, context), + }), + }; +}; + +const serializeAws_json1_1DescribeVolumesRequest = (input: DescribeVolumesRequest, context: __SerdeContext): any => { + return { + ...(input.Filters !== undefined && + input.Filters !== null && { Filters: serializeAws_json1_1VolumeFilters(input.Filters, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.VolumeIds !== undefined && + input.VolumeIds !== null && { VolumeIds: serializeAws_json1_1VolumeIds(input.VolumeIds, context) }), + }; +}; + +const serializeAws_json1_1DisassociateFileSystemAliasesRequest = ( + input: DisassociateFileSystemAliasesRequest, + context: __SerdeContext +): any => { + return { + ...(input.Aliases !== undefined && + input.Aliases !== null && { Aliases: serializeAws_json1_1AlternateDNSNames(input.Aliases, context) }), + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), + }; +}; + +const serializeAws_json1_1DiskIopsConfiguration = (input: DiskIopsConfiguration, context: __SerdeContext): any => { + return { + ...(input.Iops !== undefined && input.Iops !== null && { Iops: input.Iops }), + ...(input.Mode !== undefined && input.Mode !== null && { Mode: input.Mode }), + }; +}; + +const serializeAws_json1_1DnsIps = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1EventTypes = (input: (EventType | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1FileSystemIds = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1Filter = (input: Filter, context: __SerdeContext): any => { + return { + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Values !== undefined && + input.Values !== null && { Values: serializeAws_json1_1FilterValues(input.Values, context) }), + }; +}; + +const serializeAws_json1_1Filters = (input: Filter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1Filter(entry, context); + }); +}; + +const serializeAws_json1_1FilterValues = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1ListTagsForResourceRequest = ( + input: ListTagsForResourceRequest, context: __SerdeContext ): any => { return { - ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), - ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.ResourceARN !== undefined && input.ResourceARN !== null && { ResourceARN: input.ResourceARN }), }; }; -const serializeAws_json1_1DescribeFileSystemsRequest = ( - input: DescribeFileSystemsRequest, +const serializeAws_json1_1LustreLogCreateConfiguration = ( + input: LustreLogCreateConfiguration, context: __SerdeContext ): any => { return { - ...(input.FileSystemIds !== undefined && - input.FileSystemIds !== null && { - FileSystemIds: serializeAws_json1_1FileSystemIds(input.FileSystemIds, context), - }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.Destination !== undefined && input.Destination !== null && { Destination: input.Destination }), + ...(input.Level !== undefined && input.Level !== null && { Level: input.Level }), }; }; -const serializeAws_json1_1DescribeStorageVirtualMachinesRequest = ( - input: DescribeStorageVirtualMachinesRequest, +const serializeAws_json1_1OpenZFSClientConfiguration = ( + input: OpenZFSClientConfiguration, context: __SerdeContext ): any => { return { - ...(input.Filters !== undefined && - input.Filters !== null && { Filters: serializeAws_json1_1StorageVirtualMachineFilters(input.Filters, context) }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.StorageVirtualMachineIds !== undefined && - input.StorageVirtualMachineIds !== null && { - StorageVirtualMachineIds: serializeAws_json1_1StorageVirtualMachineIds(input.StorageVirtualMachineIds, context), - }), + ...(input.Clients !== undefined && input.Clients !== null && { Clients: input.Clients }), + ...(input.Options !== undefined && + input.Options !== null && { Options: serializeAws_json1_1OpenZFSNfsExportOptions(input.Options, context) }), }; }; -const serializeAws_json1_1DescribeVolumesRequest = (input: DescribeVolumesRequest, context: __SerdeContext): any => { - return { - ...(input.Filters !== undefined && - input.Filters !== null && { Filters: serializeAws_json1_1VolumeFilters(input.Filters, context) }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.VolumeIds !== undefined && - input.VolumeIds !== null && { VolumeIds: serializeAws_json1_1VolumeIds(input.VolumeIds, context) }), - }; +const serializeAws_json1_1OpenZFSClientConfigurations = ( + input: OpenZFSClientConfiguration[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1OpenZFSClientConfiguration(entry, context); + }); }; -const serializeAws_json1_1DisassociateFileSystemAliasesRequest = ( - input: DisassociateFileSystemAliasesRequest, +const serializeAws_json1_1OpenZFSCreateRootVolumeConfiguration = ( + input: OpenZFSCreateRootVolumeConfiguration, context: __SerdeContext ): any => { return { - ...(input.Aliases !== undefined && - input.Aliases !== null && { Aliases: serializeAws_json1_1AlternateDNSNames(input.Aliases, context) }), - ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), - ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), + ...(input.CopyTagsToSnapshots !== undefined && + input.CopyTagsToSnapshots !== null && { CopyTagsToSnapshots: input.CopyTagsToSnapshots }), + ...(input.DataCompressionType !== undefined && + input.DataCompressionType !== null && { DataCompressionType: input.DataCompressionType }), + ...(input.NfsExports !== undefined && + input.NfsExports !== null && { NfsExports: serializeAws_json1_1OpenZFSNfsExports(input.NfsExports, context) }), + ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.UserAndGroupQuotas !== undefined && + input.UserAndGroupQuotas !== null && { + UserAndGroupQuotas: serializeAws_json1_1OpenZFSUserAndGroupQuotas(input.UserAndGroupQuotas, context), + }), }; }; -const serializeAws_json1_1DiskIopsConfiguration = (input: DiskIopsConfiguration, context: __SerdeContext): any => { +const serializeAws_json1_1OpenZFSNfsExport = (input: OpenZFSNfsExport, context: __SerdeContext): any => { return { - ...(input.Iops !== undefined && input.Iops !== null && { Iops: input.Iops }), - ...(input.Mode !== undefined && input.Mode !== null && { Mode: input.Mode }), + ...(input.ClientConfigurations !== undefined && + input.ClientConfigurations !== null && { + ClientConfigurations: serializeAws_json1_1OpenZFSClientConfigurations(input.ClientConfigurations, context), + }), }; }; -const serializeAws_json1_1DnsIps = (input: string[], context: __SerdeContext): any => { +const serializeAws_json1_1OpenZFSNfsExportOptions = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) .map((entry) => { @@ -4094,37 +5530,54 @@ const serializeAws_json1_1DnsIps = (input: string[], context: __SerdeContext): a }); }; -const serializeAws_json1_1FileSystemIds = (input: string[], context: __SerdeContext): any => { +const serializeAws_json1_1OpenZFSNfsExports = (input: OpenZFSNfsExport[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) .map((entry) => { if (entry === null) { return null as any; } - return entry; + return serializeAws_json1_1OpenZFSNfsExport(entry, context); }); }; -const serializeAws_json1_1Filter = (input: Filter, context: __SerdeContext): any => { - return { - ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), - ...(input.Values !== undefined && - input.Values !== null && { Values: serializeAws_json1_1FilterValues(input.Values, context) }), - }; -}; - -const serializeAws_json1_1Filters = (input: Filter[], context: __SerdeContext): any => { +const serializeAws_json1_1OpenZFSUserAndGroupQuotas = ( + input: OpenZFSUserOrGroupQuota[], + context: __SerdeContext +): any => { return input .filter((e: any) => e != null) .map((entry) => { if (entry === null) { return null as any; } - return serializeAws_json1_1Filter(entry, context); + return serializeAws_json1_1OpenZFSUserOrGroupQuota(entry, context); }); }; -const serializeAws_json1_1FilterValues = (input: string[], context: __SerdeContext): any => { +const serializeAws_json1_1OpenZFSUserOrGroupQuota = (input: OpenZFSUserOrGroupQuota, context: __SerdeContext): any => { + return { + ...(input.Id !== undefined && input.Id !== null && { Id: input.Id }), + ...(input.StorageCapacityQuotaGiB !== undefined && + input.StorageCapacityQuotaGiB !== null && { StorageCapacityQuotaGiB: input.StorageCapacityQuotaGiB }), + ...(input.Type !== undefined && input.Type !== null && { Type: input.Type }), + }; +}; + +const serializeAws_json1_1ReleaseFileSystemNfsV3LocksRequest = ( + input: ReleaseFileSystemNfsV3LocksRequest, + context: __SerdeContext +): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.FileSystemId !== undefined && input.FileSystemId !== null && { FileSystemId: input.FileSystemId }), + }; +}; + +const serializeAws_json1_1RestoreOpenZFSVolumeOptions = ( + input: (RestoreOpenZFSVolumeOption | string)[], + context: __SerdeContext +): any => { return input .filter((e: any) => e != null) .map((entry) => { @@ -4135,14 +5588,16 @@ const serializeAws_json1_1FilterValues = (input: string[], context: __SerdeConte }); }; -const serializeAws_json1_1ListTagsForResourceRequest = ( - input: ListTagsForResourceRequest, +const serializeAws_json1_1RestoreVolumeFromSnapshotRequest = ( + input: RestoreVolumeFromSnapshotRequest, context: __SerdeContext ): any => { return { - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.ResourceARN !== undefined && input.ResourceARN !== null && { ResourceARN: input.ResourceARN }), + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.Options !== undefined && + input.Options !== null && { Options: serializeAws_json1_1RestoreOpenZFSVolumeOptions(input.Options, context) }), + ...(input.SnapshotId !== undefined && input.SnapshotId !== null && { SnapshotId: input.SnapshotId }), + ...(input.VolumeId !== undefined && input.VolumeId !== null && { VolumeId: input.VolumeId }), }; }; @@ -4157,6 +5612,22 @@ const serializeAws_json1_1RouteTableIds = (input: string[], context: __SerdeCont }); }; +const serializeAws_json1_1S3DataRepositoryConfiguration = ( + input: S3DataRepositoryConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.AutoExportPolicy !== undefined && + input.AutoExportPolicy !== null && { + AutoExportPolicy: serializeAws_json1_1AutoExportPolicy(input.AutoExportPolicy, context), + }), + ...(input.AutoImportPolicy !== undefined && + input.AutoImportPolicy !== null && { + AutoImportPolicy: serializeAws_json1_1AutoImportPolicy(input.AutoImportPolicy, context), + }), + }; +}; + const serializeAws_json1_1SecurityGroupIds = (input: string[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -4201,6 +5672,47 @@ const serializeAws_json1_1SelfManagedActiveDirectoryConfigurationUpdates = ( }; }; +const serializeAws_json1_1SnapshotFilter = (input: SnapshotFilter, context: __SerdeContext): any => { + return { + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Values !== undefined && + input.Values !== null && { Values: serializeAws_json1_1SnapshotFilterValues(input.Values, context) }), + }; +}; + +const serializeAws_json1_1SnapshotFilters = (input: SnapshotFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_json1_1SnapshotFilter(entry, context); + }); +}; + +const serializeAws_json1_1SnapshotFilterValues = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_json1_1SnapshotIds = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + const serializeAws_json1_1StorageVirtualMachineFilter = ( input: StorageVirtualMachineFilter, context: __SerdeContext @@ -4323,6 +5835,20 @@ const serializeAws_json1_1UntagResourceRequest = (input: UntagResourceRequest, c }; }; +const serializeAws_json1_1UpdateDataRepositoryAssociationRequest = ( + input: UpdateDataRepositoryAssociationRequest, + context: __SerdeContext +): any => { + return { + ...(input.AssociationId !== undefined && input.AssociationId !== null && { AssociationId: input.AssociationId }), + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.ImportedFileChunkSize !== undefined && + input.ImportedFileChunkSize !== null && { ImportedFileChunkSize: input.ImportedFileChunkSize }), + ...(input.S3 !== undefined && + input.S3 !== null && { S3: serializeAws_json1_1S3DataRepositoryConfiguration(input.S3, context) }), + }; +}; + const serializeAws_json1_1UpdateFileSystemLustreConfiguration = ( input: UpdateFileSystemLustreConfiguration, context: __SerdeContext @@ -4340,6 +5866,10 @@ const serializeAws_json1_1UpdateFileSystemLustreConfiguration = ( }), ...(input.DataCompressionType !== undefined && input.DataCompressionType !== null && { DataCompressionType: input.DataCompressionType }), + ...(input.LogConfiguration !== undefined && + input.LogConfiguration !== null && { + LogConfiguration: serializeAws_json1_1LustreLogCreateConfiguration(input.LogConfiguration, context), + }), ...(input.WeeklyMaintenanceStartTime !== undefined && input.WeeklyMaintenanceStartTime !== null && { WeeklyMaintenanceStartTime: input.WeeklyMaintenanceStartTime }), }; @@ -4365,6 +5895,34 @@ const serializeAws_json1_1UpdateFileSystemOntapConfiguration = ( }; }; +const serializeAws_json1_1UpdateFileSystemOpenZFSConfiguration = ( + input: UpdateFileSystemOpenZFSConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.AutomaticBackupRetentionDays !== undefined && + input.AutomaticBackupRetentionDays !== null && { + AutomaticBackupRetentionDays: input.AutomaticBackupRetentionDays, + }), + ...(input.CopyTagsToBackups !== undefined && + input.CopyTagsToBackups !== null && { CopyTagsToBackups: input.CopyTagsToBackups }), + ...(input.CopyTagsToVolumes !== undefined && + input.CopyTagsToVolumes !== null && { CopyTagsToVolumes: input.CopyTagsToVolumes }), + ...(input.DailyAutomaticBackupStartTime !== undefined && + input.DailyAutomaticBackupStartTime !== null && { + DailyAutomaticBackupStartTime: input.DailyAutomaticBackupStartTime, + }), + ...(input.DiskIopsConfiguration !== undefined && + input.DiskIopsConfiguration !== null && { + DiskIopsConfiguration: serializeAws_json1_1DiskIopsConfiguration(input.DiskIopsConfiguration, context), + }), + ...(input.ThroughputCapacity !== undefined && + input.ThroughputCapacity !== null && { ThroughputCapacity: input.ThroughputCapacity }), + ...(input.WeeklyMaintenanceStartTime !== undefined && + input.WeeklyMaintenanceStartTime !== null && { WeeklyMaintenanceStartTime: input.WeeklyMaintenanceStartTime }), + }; +}; + const serializeAws_json1_1UpdateFileSystemRequest = (input: UpdateFileSystemRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), @@ -4380,6 +5938,13 @@ const serializeAws_json1_1UpdateFileSystemRequest = (input: UpdateFileSystemRequ input.OntapConfiguration !== null && { OntapConfiguration: serializeAws_json1_1UpdateFileSystemOntapConfiguration(input.OntapConfiguration, context), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1UpdateFileSystemOpenZFSConfiguration( + input.OpenZFSConfiguration, + context + ), + }), ...(input.StorageCapacity !== undefined && input.StorageCapacity !== null && { StorageCapacity: input.StorageCapacity }), ...(input.WindowsConfiguration !== undefined && @@ -4444,6 +6009,37 @@ const serializeAws_json1_1UpdateOntapVolumeConfiguration = ( }; }; +const serializeAws_json1_1UpdateOpenZFSVolumeConfiguration = ( + input: UpdateOpenZFSVolumeConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.DataCompressionType !== undefined && + input.DataCompressionType !== null && { DataCompressionType: input.DataCompressionType }), + ...(input.NfsExports !== undefined && + input.NfsExports !== null && { NfsExports: serializeAws_json1_1OpenZFSNfsExports(input.NfsExports, context) }), + ...(input.ReadOnly !== undefined && input.ReadOnly !== null && { ReadOnly: input.ReadOnly }), + ...(input.StorageCapacityQuotaGiB !== undefined && + input.StorageCapacityQuotaGiB !== null && { StorageCapacityQuotaGiB: input.StorageCapacityQuotaGiB }), + ...(input.StorageCapacityReservationGiB !== undefined && + input.StorageCapacityReservationGiB !== null && { + StorageCapacityReservationGiB: input.StorageCapacityReservationGiB, + }), + ...(input.UserAndGroupQuotas !== undefined && + input.UserAndGroupQuotas !== null && { + UserAndGroupQuotas: serializeAws_json1_1OpenZFSUserAndGroupQuotas(input.UserAndGroupQuotas, context), + }), + }; +}; + +const serializeAws_json1_1UpdateSnapshotRequest = (input: UpdateSnapshotRequest, context: __SerdeContext): any => { + return { + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.SnapshotId !== undefined && input.SnapshotId !== null && { SnapshotId: input.SnapshotId }), + }; +}; + const serializeAws_json1_1UpdateStorageVirtualMachineRequest = ( input: UpdateStorageVirtualMachineRequest, context: __SerdeContext @@ -4482,10 +6078,15 @@ const serializeAws_json1_1UpdateSvmActiveDirectoryConfiguration = ( const serializeAws_json1_1UpdateVolumeRequest = (input: UpdateVolumeRequest, context: __SerdeContext): any => { return { ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), ...(input.OntapConfiguration !== undefined && input.OntapConfiguration !== null && { OntapConfiguration: serializeAws_json1_1UpdateOntapVolumeConfiguration(input.OntapConfiguration, context), }), + ...(input.OpenZFSConfiguration !== undefined && + input.OpenZFSConfiguration !== null && { + OpenZFSConfiguration: serializeAws_json1_1UpdateOpenZFSVolumeConfiguration(input.OpenZFSConfiguration, context), + }), ...(input.VolumeId !== undefined && input.VolumeId !== null && { VolumeId: input.VolumeId }), }; }; @@ -4583,6 +6184,10 @@ const deserializeAws_json1_1AdministrativeAction = (output: any, context: __Serd output.TargetFileSystemValues !== undefined && output.TargetFileSystemValues !== null ? deserializeAws_json1_1FileSystem(output.TargetFileSystemValues, context) : undefined, + TargetSnapshotValues: + output.TargetSnapshotValues !== undefined && output.TargetSnapshotValues !== null + ? deserializeAws_json1_1Snapshot(output.TargetSnapshotValues, context) + : undefined, TargetVolumeValues: output.TargetVolumeValues !== undefined && output.TargetVolumeValues !== null ? deserializeAws_json1_1Volume(output.TargetVolumeValues, context) @@ -4640,6 +6245,24 @@ const deserializeAws_json1_1AssociateFileSystemAliasesResponse = ( } as any; }; +const deserializeAws_json1_1AutoExportPolicy = (output: any, context: __SerdeContext): AutoExportPolicy => { + return { + Events: + output.Events !== undefined && output.Events !== null + ? deserializeAws_json1_1EventTypes(output.Events, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1AutoImportPolicy = (output: any, context: __SerdeContext): AutoImportPolicy => { + return { + Events: + output.Events !== undefined && output.Events !== null + ? deserializeAws_json1_1EventTypes(output.Events, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1Backup = (output: any, context: __SerdeContext): Backup => { return { BackupId: __expectString(output.BackupId), @@ -4763,6 +6386,18 @@ const deserializeAws_json1_1CreateBackupResponse = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1CreateDataRepositoryAssociationResponse = ( + output: any, + context: __SerdeContext +): CreateDataRepositoryAssociationResponse => { + return { + Association: + output.Association !== undefined && output.Association !== null + ? deserializeAws_json1_1DataRepositoryAssociation(output.Association, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1CreateDataRepositoryTaskResponse = ( output: any, context: __SerdeContext @@ -4799,6 +6434,15 @@ const deserializeAws_json1_1CreateFileSystemResponse = ( } as any; }; +const deserializeAws_json1_1CreateSnapshotResponse = (output: any, context: __SerdeContext): CreateSnapshotResponse => { + return { + Snapshot: + output.Snapshot !== undefined && output.Snapshot !== null + ? deserializeAws_json1_1Snapshot(output.Snapshot, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1CreateStorageVirtualMachineResponse = ( output: any, context: __SerdeContext @@ -4811,25 +6455,78 @@ const deserializeAws_json1_1CreateStorageVirtualMachineResponse = ( } as any; }; -const deserializeAws_json1_1CreateVolumeFromBackupResponse = ( +const deserializeAws_json1_1CreateVolumeFromBackupResponse = ( + output: any, + context: __SerdeContext +): CreateVolumeFromBackupResponse => { + return { + Volume: + output.Volume !== undefined && output.Volume !== null + ? deserializeAws_json1_1Volume(output.Volume, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1CreateVolumeResponse = (output: any, context: __SerdeContext): CreateVolumeResponse => { + return { + Volume: + output.Volume !== undefined && output.Volume !== null + ? deserializeAws_json1_1Volume(output.Volume, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1DataRepositoryAssociation = ( + output: any, + context: __SerdeContext +): DataRepositoryAssociation => { + return { + AssociationId: __expectString(output.AssociationId), + BatchImportMetaDataOnCreate: __expectBoolean(output.BatchImportMetaDataOnCreate), + CreationTime: + output.CreationTime !== undefined && output.CreationTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + DataRepositoryPath: __expectString(output.DataRepositoryPath), + FailureDetails: + output.FailureDetails !== undefined && output.FailureDetails !== null + ? deserializeAws_json1_1DataRepositoryFailureDetails(output.FailureDetails, context) + : undefined, + FileSystemId: __expectString(output.FileSystemId), + FileSystemPath: __expectString(output.FileSystemPath), + ImportedFileChunkSize: __expectInt32(output.ImportedFileChunkSize), + Lifecycle: __expectString(output.Lifecycle), + ResourceARN: __expectString(output.ResourceARN), + S3: + output.S3 !== undefined && output.S3 !== null + ? deserializeAws_json1_1S3DataRepositoryConfiguration(output.S3, context) + : undefined, + Tags: + output.Tags !== undefined && output.Tags !== null ? deserializeAws_json1_1Tags(output.Tags, context) : undefined, + } as any; +}; + +const deserializeAws_json1_1DataRepositoryAssociationNotFound = ( output: any, context: __SerdeContext -): CreateVolumeFromBackupResponse => { +): DataRepositoryAssociationNotFound => { return { - Volume: - output.Volume !== undefined && output.Volume !== null - ? deserializeAws_json1_1Volume(output.Volume, context) - : undefined, + Message: __expectString(output.Message), } as any; }; -const deserializeAws_json1_1CreateVolumeResponse = (output: any, context: __SerdeContext): CreateVolumeResponse => { - return { - Volume: - output.Volume !== undefined && output.Volume !== null - ? deserializeAws_json1_1Volume(output.Volume, context) - : undefined, - } as any; +const deserializeAws_json1_1DataRepositoryAssociations = ( + output: any, + context: __SerdeContext +): DataRepositoryAssociation[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1DataRepositoryAssociation(entry, context); + }); }; const deserializeAws_json1_1DataRepositoryConfiguration = ( @@ -4978,6 +6675,17 @@ const deserializeAws_json1_1DeleteBackupResponse = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1DeleteDataRepositoryAssociationResponse = ( + output: any, + context: __SerdeContext +): DeleteDataRepositoryAssociationResponse => { + return { + AssociationId: __expectString(output.AssociationId), + DeleteDataInFileSystem: __expectBoolean(output.DeleteDataInFileSystem), + Lifecycle: __expectString(output.Lifecycle), + } as any; +}; + const deserializeAws_json1_1DeleteFileSystemLustreResponse = ( output: any, context: __SerdeContext @@ -4991,6 +6699,19 @@ const deserializeAws_json1_1DeleteFileSystemLustreResponse = ( } as any; }; +const deserializeAws_json1_1DeleteFileSystemOpenZFSResponse = ( + output: any, + context: __SerdeContext +): DeleteFileSystemOpenZFSResponse => { + return { + FinalBackupId: __expectString(output.FinalBackupId), + FinalBackupTags: + output.FinalBackupTags !== undefined && output.FinalBackupTags !== null + ? deserializeAws_json1_1Tags(output.FinalBackupTags, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1DeleteFileSystemResponse = ( output: any, context: __SerdeContext @@ -5002,6 +6723,10 @@ const deserializeAws_json1_1DeleteFileSystemResponse = ( output.LustreResponse !== undefined && output.LustreResponse !== null ? deserializeAws_json1_1DeleteFileSystemLustreResponse(output.LustreResponse, context) : undefined, + OpenZFSResponse: + output.OpenZFSResponse !== undefined && output.OpenZFSResponse !== null + ? deserializeAws_json1_1DeleteFileSystemOpenZFSResponse(output.OpenZFSResponse, context) + : undefined, WindowsResponse: output.WindowsResponse !== undefined && output.WindowsResponse !== null ? deserializeAws_json1_1DeleteFileSystemWindowsResponse(output.WindowsResponse, context) @@ -5022,6 +6747,13 @@ const deserializeAws_json1_1DeleteFileSystemWindowsResponse = ( } as any; }; +const deserializeAws_json1_1DeleteSnapshotResponse = (output: any, context: __SerdeContext): DeleteSnapshotResponse => { + return { + Lifecycle: __expectString(output.Lifecycle), + SnapshotId: __expectString(output.SnapshotId), + } as any; +}; + const deserializeAws_json1_1DeleteStorageVirtualMachineResponse = ( output: any, context: __SerdeContext @@ -5069,6 +6801,19 @@ const deserializeAws_json1_1DescribeBackupsResponse = ( } as any; }; +const deserializeAws_json1_1DescribeDataRepositoryAssociationsResponse = ( + output: any, + context: __SerdeContext +): DescribeDataRepositoryAssociationsResponse => { + return { + Associations: + output.Associations !== undefined && output.Associations !== null + ? deserializeAws_json1_1DataRepositoryAssociations(output.Associations, context) + : undefined, + NextToken: __expectString(output.NextToken), + } as any; +}; + const deserializeAws_json1_1DescribeDataRepositoryTasksResponse = ( output: any, context: __SerdeContext @@ -5108,6 +6853,19 @@ const deserializeAws_json1_1DescribeFileSystemsResponse = ( } as any; }; +const deserializeAws_json1_1DescribeSnapshotsResponse = ( + output: any, + context: __SerdeContext +): DescribeSnapshotsResponse => { + return { + NextToken: __expectString(output.NextToken), + Snapshots: + output.Snapshots !== undefined && output.Snapshots !== null + ? deserializeAws_json1_1Snapshots(output.Snapshots, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1DescribeStorageVirtualMachinesResponse = ( output: any, context: __SerdeContext @@ -5164,6 +6922,17 @@ const deserializeAws_json1_1DnsIps = (output: any, context: __SerdeContext): str }); }; +const deserializeAws_json1_1EventTypes = (output: any, context: __SerdeContext): (EventType | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + const deserializeAws_json1_1FileSystem = (output: any, context: __SerdeContext): FileSystem => { return { AdministrativeActions: @@ -5196,6 +6965,10 @@ const deserializeAws_json1_1FileSystem = (output: any, context: __SerdeContext): output.OntapConfiguration !== undefined && output.OntapConfiguration !== null ? deserializeAws_json1_1OntapFileSystemConfiguration(output.OntapConfiguration, context) : undefined, + OpenZFSConfiguration: + output.OpenZFSConfiguration !== undefined && output.OpenZFSConfiguration !== null + ? deserializeAws_json1_1OpenZFSFileSystemConfiguration(output.OpenZFSConfiguration, context) + : undefined, OwnerId: __expectString(output.OwnerId), ResourceARN: __expectString(output.ResourceARN), StorageCapacity: __expectInt32(output.StorageCapacity), @@ -5302,6 +7075,15 @@ const deserializeAws_json1_1InternalServerError = (output: any, context: __Serde } as any; }; +const deserializeAws_json1_1InvalidDataRepositoryType = ( + output: any, + context: __SerdeContext +): InvalidDataRepositoryType => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1InvalidDestinationKmsKey = ( output: any, context: __SerdeContext @@ -5388,12 +7170,23 @@ const deserializeAws_json1_1LustreFileSystemConfiguration = ( : undefined, DeploymentType: __expectString(output.DeploymentType), DriveCacheType: __expectString(output.DriveCacheType), + LogConfiguration: + output.LogConfiguration !== undefined && output.LogConfiguration !== null + ? deserializeAws_json1_1LustreLogConfiguration(output.LogConfiguration, context) + : undefined, MountName: __expectString(output.MountName), PerUnitStorageThroughput: __expectInt32(output.PerUnitStorageThroughput), WeeklyMaintenanceStartTime: __expectString(output.WeeklyMaintenanceStartTime), } as any; }; +const deserializeAws_json1_1LustreLogConfiguration = (output: any, context: __SerdeContext): LustreLogConfiguration => { + return { + Destination: __expectString(output.Destination), + Level: __expectString(output.Level), + } as any; +}; + const deserializeAws_json1_1MissingFileSystemConfiguration = ( output: any, context: __SerdeContext @@ -5492,6 +7285,158 @@ const deserializeAws_json1_1OntapVolumeConfiguration = ( } as any; }; +const deserializeAws_json1_1OpenZFSClientConfiguration = ( + output: any, + context: __SerdeContext +): OpenZFSClientConfiguration => { + return { + Clients: __expectString(output.Clients), + Options: + output.Options !== undefined && output.Options !== null + ? deserializeAws_json1_1OpenZFSNfsExportOptions(output.Options, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1OpenZFSClientConfigurations = ( + output: any, + context: __SerdeContext +): OpenZFSClientConfiguration[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1OpenZFSClientConfiguration(entry, context); + }); +}; + +const deserializeAws_json1_1OpenZFSFileSystemConfiguration = ( + output: any, + context: __SerdeContext +): OpenZFSFileSystemConfiguration => { + return { + AutomaticBackupRetentionDays: __expectInt32(output.AutomaticBackupRetentionDays), + CopyTagsToBackups: __expectBoolean(output.CopyTagsToBackups), + CopyTagsToVolumes: __expectBoolean(output.CopyTagsToVolumes), + DailyAutomaticBackupStartTime: __expectString(output.DailyAutomaticBackupStartTime), + DeploymentType: __expectString(output.DeploymentType), + DiskIopsConfiguration: + output.DiskIopsConfiguration !== undefined && output.DiskIopsConfiguration !== null + ? deserializeAws_json1_1DiskIopsConfiguration(output.DiskIopsConfiguration, context) + : undefined, + RootVolumeId: __expectString(output.RootVolumeId), + ThroughputCapacity: __expectInt32(output.ThroughputCapacity), + WeeklyMaintenanceStartTime: __expectString(output.WeeklyMaintenanceStartTime), + } as any; +}; + +const deserializeAws_json1_1OpenZFSNfsExport = (output: any, context: __SerdeContext): OpenZFSNfsExport => { + return { + ClientConfigurations: + output.ClientConfigurations !== undefined && output.ClientConfigurations !== null + ? deserializeAws_json1_1OpenZFSClientConfigurations(output.ClientConfigurations, context) + : undefined, + } as any; +}; + +const deserializeAws_json1_1OpenZFSNfsExportOptions = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_json1_1OpenZFSNfsExports = (output: any, context: __SerdeContext): OpenZFSNfsExport[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1OpenZFSNfsExport(entry, context); + }); +}; + +const deserializeAws_json1_1OpenZFSOriginSnapshotConfiguration = ( + output: any, + context: __SerdeContext +): OpenZFSOriginSnapshotConfiguration => { + return { + CopyStrategy: __expectString(output.CopyStrategy), + SnapshotARN: __expectString(output.SnapshotARN), + } as any; +}; + +const deserializeAws_json1_1OpenZFSUserAndGroupQuotas = ( + output: any, + context: __SerdeContext +): OpenZFSUserOrGroupQuota[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1OpenZFSUserOrGroupQuota(entry, context); + }); +}; + +const deserializeAws_json1_1OpenZFSUserOrGroupQuota = ( + output: any, + context: __SerdeContext +): OpenZFSUserOrGroupQuota => { + return { + Id: __expectInt32(output.Id), + StorageCapacityQuotaGiB: __expectInt32(output.StorageCapacityQuotaGiB), + Type: __expectString(output.Type), + } as any; +}; + +const deserializeAws_json1_1OpenZFSVolumeConfiguration = ( + output: any, + context: __SerdeContext +): OpenZFSVolumeConfiguration => { + return { + CopyTagsToSnapshots: __expectBoolean(output.CopyTagsToSnapshots), + DataCompressionType: __expectString(output.DataCompressionType), + NfsExports: + output.NfsExports !== undefined && output.NfsExports !== null + ? deserializeAws_json1_1OpenZFSNfsExports(output.NfsExports, context) + : undefined, + OriginSnapshot: + output.OriginSnapshot !== undefined && output.OriginSnapshot !== null + ? deserializeAws_json1_1OpenZFSOriginSnapshotConfiguration(output.OriginSnapshot, context) + : undefined, + ParentVolumeId: __expectString(output.ParentVolumeId), + ReadOnly: __expectBoolean(output.ReadOnly), + StorageCapacityQuotaGiB: __expectInt32(output.StorageCapacityQuotaGiB), + StorageCapacityReservationGiB: __expectInt32(output.StorageCapacityReservationGiB), + UserAndGroupQuotas: + output.UserAndGroupQuotas !== undefined && output.UserAndGroupQuotas !== null + ? deserializeAws_json1_1OpenZFSUserAndGroupQuotas(output.UserAndGroupQuotas, context) + : undefined, + VolumePath: __expectString(output.VolumePath), + } as any; +}; + +const deserializeAws_json1_1ReleaseFileSystemNfsV3LocksResponse = ( + output: any, + context: __SerdeContext +): ReleaseFileSystemNfsV3LocksResponse => { + return { + FileSystem: + output.FileSystem !== undefined && output.FileSystem !== null + ? deserializeAws_json1_1FileSystem(output.FileSystem, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1ResourceDoesNotSupportTagging = ( output: any, context: __SerdeContext @@ -5509,6 +7454,16 @@ const deserializeAws_json1_1ResourceNotFound = (output: any, context: __SerdeCon } as any; }; +const deserializeAws_json1_1RestoreVolumeFromSnapshotResponse = ( + output: any, + context: __SerdeContext +): RestoreVolumeFromSnapshotResponse => { + return { + Lifecycle: __expectString(output.Lifecycle), + VolumeId: __expectString(output.VolumeId), + } as any; +}; + const deserializeAws_json1_1RouteTableIds = (output: any, context: __SerdeContext): string[] => { return (output || []) .filter((e: any) => e != null) @@ -5520,6 +7475,22 @@ const deserializeAws_json1_1RouteTableIds = (output: any, context: __SerdeContex }); }; +const deserializeAws_json1_1S3DataRepositoryConfiguration = ( + output: any, + context: __SerdeContext +): S3DataRepositoryConfiguration => { + return { + AutoExportPolicy: + output.AutoExportPolicy !== undefined && output.AutoExportPolicy !== null + ? deserializeAws_json1_1AutoExportPolicy(output.AutoExportPolicy, context) + : undefined, + AutoImportPolicy: + output.AutoImportPolicy !== undefined && output.AutoImportPolicy !== null + ? deserializeAws_json1_1AutoImportPolicy(output.AutoImportPolicy, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1SelfManagedActiveDirectoryAttributes = ( output: any, context: __SerdeContext @@ -5543,6 +7514,43 @@ const deserializeAws_json1_1ServiceLimitExceeded = (output: any, context: __Serd } as any; }; +const deserializeAws_json1_1Snapshot = (output: any, context: __SerdeContext): Snapshot => { + return { + AdministrativeActions: + output.AdministrativeActions !== undefined && output.AdministrativeActions !== null + ? deserializeAws_json1_1AdministrativeActions(output.AdministrativeActions, context) + : undefined, + CreationTime: + output.CreationTime !== undefined && output.CreationTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) + : undefined, + Lifecycle: __expectString(output.Lifecycle), + Name: __expectString(output.Name), + ResourceARN: __expectString(output.ResourceARN), + SnapshotId: __expectString(output.SnapshotId), + Tags: + output.Tags !== undefined && output.Tags !== null ? deserializeAws_json1_1Tags(output.Tags, context) : undefined, + VolumeId: __expectString(output.VolumeId), + } as any; +}; + +const deserializeAws_json1_1SnapshotNotFound = (output: any, context: __SerdeContext): SnapshotNotFound => { + return { + Message: __expectString(output.Message), + } as any; +}; + +const deserializeAws_json1_1Snapshots = (output: any, context: __SerdeContext): Snapshot[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_json1_1Snapshot(entry, context); + }); +}; + const deserializeAws_json1_1SourceBackupUnavailable = ( output: any, context: __SerdeContext @@ -5705,6 +7713,18 @@ const deserializeAws_json1_1UntagResourceResponse = (output: any, context: __Ser return {} as any; }; +const deserializeAws_json1_1UpdateDataRepositoryAssociationResponse = ( + output: any, + context: __SerdeContext +): UpdateDataRepositoryAssociationResponse => { + return { + Association: + output.Association !== undefined && output.Association !== null + ? deserializeAws_json1_1DataRepositoryAssociation(output.Association, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1UpdateFileSystemResponse = ( output: any, context: __SerdeContext @@ -5717,6 +7737,15 @@ const deserializeAws_json1_1UpdateFileSystemResponse = ( } as any; }; +const deserializeAws_json1_1UpdateSnapshotResponse = (output: any, context: __SerdeContext): UpdateSnapshotResponse => { + return { + Snapshot: + output.Snapshot !== undefined && output.Snapshot !== null + ? deserializeAws_json1_1Snapshot(output.Snapshot, context) + : undefined, + } as any; +}; + const deserializeAws_json1_1UpdateStorageVirtualMachineResponse = ( output: any, context: __SerdeContext @@ -5740,6 +7769,10 @@ const deserializeAws_json1_1UpdateVolumeResponse = (output: any, context: __Serd const deserializeAws_json1_1Volume = (output: any, context: __SerdeContext): Volume => { return { + AdministrativeActions: + output.AdministrativeActions !== undefined && output.AdministrativeActions !== null + ? deserializeAws_json1_1AdministrativeActions(output.AdministrativeActions, context) + : undefined, CreationTime: output.CreationTime !== undefined && output.CreationTime !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreationTime))) @@ -5755,6 +7788,10 @@ const deserializeAws_json1_1Volume = (output: any, context: __SerdeContext): Vol output.OntapConfiguration !== undefined && output.OntapConfiguration !== null ? deserializeAws_json1_1OntapVolumeConfiguration(output.OntapConfiguration, context) : undefined, + OpenZFSConfiguration: + output.OpenZFSConfiguration !== undefined && output.OpenZFSConfiguration !== null + ? deserializeAws_json1_1OpenZFSVolumeConfiguration(output.OpenZFSConfiguration, context) + : undefined, ResourceARN: __expectString(output.ResourceARN), Tags: output.Tags !== undefined && output.Tags !== null ? deserializeAws_json1_1Tags(output.Tags, context) : undefined, diff --git a/clients/client-glue/src/commands/GetDatabaseCommand.ts b/clients/client-glue/src/commands/GetDatabaseCommand.ts index a0c2ceada77a..687b933904c0 100644 --- a/clients/client-glue/src/commands/GetDatabaseCommand.ts +++ b/clients/client-glue/src/commands/GetDatabaseCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { GlueClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../GlueClient"; -import { GetDatabaseRequest } from "../models/models_0"; -import { GetDatabaseResponse } from "../models/models_1"; +import { GetDatabaseRequest, GetDatabaseResponse } from "../models/models_1"; import { deserializeAws_json1_1GetDatabaseCommand, serializeAws_json1_1GetDatabaseCommand, diff --git a/clients/client-glue/src/models/models_0.ts b/clients/client-glue/src/models/models_0.ts index e0577a87ec94..b6605d099bab 100644 --- a/clients/client-glue/src/models/models_0.ts +++ b/clients/client-glue/src/models/models_0.ts @@ -779,6 +779,11 @@ export interface BatchDeleteTableRequest { *

                                  A list of the table to delete.

                                  */ TablesToDelete: string[] | undefined; + + /** + *

                                  The transaction ID at which to delete the table contents.

                                  + */ + TransactionId?: string; } export namespace BatchDeleteTableRequest { @@ -830,6 +835,27 @@ export namespace BatchDeleteTableResponse { }); } +/** + *

                                  A resource was not ready for a transaction.

                                  + */ +export interface ResourceNotReadyException extends __SmithyException, $MetadataBearer { + name: "ResourceNotReadyException"; + $fault: "client"; + /** + *

                                  A message describing the problem.

                                  + */ + Message?: string; +} + +export namespace ResourceNotReadyException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotReadyException): any => ({ + ...obj, + }); +} + export interface BatchDeleteTableVersionRequest { /** *

                                  The ID of the Data Catalog where the tables reside. If none is provided, the Amazon Web Services account @@ -2262,6 +2288,27 @@ export namespace BatchGetPartitionResponse { }); } +/** + *

                                  An error that indicates your data is in an invalid state.

                                  + */ +export interface InvalidStateException extends __SmithyException, $MetadataBearer { + name: "InvalidStateException"; + $fault: "client"; + /** + *

                                  A message describing the problem.

                                  + */ + Message?: string; +} + +export namespace InvalidStateException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InvalidStateException): any => ({ + ...obj, + }); +} + export interface BatchGetTriggersRequest { /** *

                                  A list of trigger names, which may be the names returned from the ListTriggers operation.

                                  @@ -5824,6 +5871,11 @@ export interface CreateTableRequest { *

                                  A list of partition indexes, PartitionIndex structures, to create in the table.

                                  */ PartitionIndexes?: PartitionIndex[]; + + /** + *

                                  The ID of the transaction.

                                  + */ + TransactionId?: string; } export namespace CreateTableRequest { @@ -6861,6 +6913,11 @@ export interface DeleteTableRequest { * compatibility, this name is entirely lowercase.

                                  */ Name: string | undefined; + + /** + *

                                  The transaction ID at which to delete the table contents.

                                  + */ + TransactionId?: string; } export namespace DeleteTableRequest { @@ -8615,83 +8672,3 @@ export namespace GetCrawlersResponse { ...obj, }); } - -export interface GetDatabaseRequest { - /** - *

                                  The ID of the Data Catalog in which the database resides. If none is provided, the Amazon Web Services - * account ID is used by default.

                                  - */ - CatalogId?: string; - - /** - *

                                  The name of the database to retrieve. For Hive compatibility, this - * should be all lowercase.

                                  - */ - Name: string | undefined; -} - -export namespace GetDatabaseRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GetDatabaseRequest): any => ({ - ...obj, - }); -} - -/** - *

                                  The Database object represents a logical grouping of tables that might reside - * in a Hive metastore or an RDBMS.

                                  - */ -export interface Database { - /** - *

                                  The name of the database. For Hive compatibility, this is folded to lowercase when it is - * stored.

                                  - */ - Name: string | undefined; - - /** - *

                                  A description of the database.

                                  - */ - Description?: string; - - /** - *

                                  The location of the database (for example, an HDFS path).

                                  - */ - LocationUri?: string; - - /** - *

                                  These key-value pairs define parameters and properties - * of the database.

                                  - */ - Parameters?: { [key: string]: string }; - - /** - *

                                  The time at which the metadata database was created in the catalog.

                                  - */ - CreateTime?: Date; - - /** - *

                                  Creates a set of default permissions on the table for principals.

                                  - */ - CreateTableDefaultPermissions?: PrincipalPermissions[]; - - /** - *

                                  A DatabaseIdentifier structure that describes a target database for resource linking.

                                  - */ - TargetDatabase?: DatabaseIdentifier; - - /** - *

                                  The ID of the Data Catalog in which the database resides.

                                  - */ - CatalogId?: string; -} - -export namespace Database { - /** - * @internal - */ - export const filterSensitiveLog = (obj: Database): any => ({ - ...obj, - }); -} diff --git a/clients/client-glue/src/models/models_1.ts b/clients/client-glue/src/models/models_1.ts index 7f6aaff62fe3..e46735516b50 100644 --- a/clients/client-glue/src/models/models_1.ts +++ b/clients/client-glue/src/models/models_1.ts @@ -12,7 +12,7 @@ import { ConnectionsList, CrawlerTargets, CsvHeaderOption, - Database, + DatabaseIdentifier, DatabaseInput, DataFormat, DevEndpoint, @@ -31,6 +31,7 @@ import { PartitionInput, PartitionValueList, Predicate, + PrincipalPermissions, PrincipalType, RecrawlPolicy, RegistryId, @@ -54,6 +55,86 @@ import { WorkflowRun, } from "./models_0"; +export interface GetDatabaseRequest { + /** + *

                                  The ID of the Data Catalog in which the database resides. If none is provided, the Amazon Web Services + * account ID is used by default.

                                  + */ + CatalogId?: string; + + /** + *

                                  The name of the database to retrieve. For Hive compatibility, this + * should be all lowercase.

                                  + */ + Name: string | undefined; +} + +export namespace GetDatabaseRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDatabaseRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  The Database object represents a logical grouping of tables that might reside + * in a Hive metastore or an RDBMS.

                                  + */ +export interface Database { + /** + *

                                  The name of the database. For Hive compatibility, this is folded to lowercase when it is + * stored.

                                  + */ + Name: string | undefined; + + /** + *

                                  A description of the database.

                                  + */ + Description?: string; + + /** + *

                                  The location of the database (for example, an HDFS path).

                                  + */ + LocationUri?: string; + + /** + *

                                  These key-value pairs define parameters and properties + * of the database.

                                  + */ + Parameters?: { [key: string]: string }; + + /** + *

                                  The time at which the metadata database was created in the catalog.

                                  + */ + CreateTime?: Date; + + /** + *

                                  Creates a set of default permissions on the table for principals.

                                  + */ + CreateTableDefaultPermissions?: PrincipalPermissions[]; + + /** + *

                                  A DatabaseIdentifier structure that describes a target database for resource linking.

                                  + */ + TargetDatabase?: DatabaseIdentifier; + + /** + *

                                  The ID of the Data Catalog in which the database resides.

                                  + */ + CatalogId?: string; +} + +export namespace Database { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Database): any => ({ + ...obj, + }); +} + export interface GetDatabaseResponse { /** *

                                  The definition of the specified database in the Data Catalog.

                                  @@ -2238,6 +2319,16 @@ export interface GetPartitionsRequest { *

                                  When true, specifies not returning the partition column schema. Useful when you are interested only in other partition attributes such as partition values or location. This approach avoids the problem of a large response by not returning duplicate data.

                                  */ ExcludeColumnSchema?: boolean; + + /** + *

                                  The transaction ID at which to read the partition contents.

                                  + */ + TransactionId?: string; + + /** + *

                                  The time as of when to read the partition contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                  + */ + QueryAsOfTime?: Date; } export namespace GetPartitionsRequest { @@ -2978,6 +3069,16 @@ export interface GetTableRequest { * compatibility, this name is entirely lowercase.

                                  */ Name: string | undefined; + + /** + *

                                  The transaction ID at which to read the table contents.

                                  + */ + TransactionId?: string; + + /** + *

                                  The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                  + */ + QueryAsOfTime?: Date; } export namespace GetTableRequest { @@ -3153,6 +3254,16 @@ export interface GetTablesRequest { *

                                  The maximum number of tables to return in a single response.

                                  */ MaxResults?: number; + + /** + *

                                  The transaction ID at which to read the table contents.

                                  + */ + TransactionId?: string; + + /** + *

                                  The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                  + */ + QueryAsOfTime?: Date; } export namespace GetTablesRequest { @@ -7122,6 +7233,11 @@ export interface UpdateTableRequest { * UpdateTable does not create the archived version.

                                  */ SkipArchive?: boolean; + + /** + *

                                  The transaction ID at which to update the table contents.

                                  + */ + TransactionId?: string; } export namespace UpdateTableRequest { diff --git a/clients/client-glue/src/protocols/Aws_json1_1.ts b/clients/client-glue/src/protocols/Aws_json1_1.ts index e49f961c02a2..6ea24d4bd004 100644 --- a/clients/client-glue/src/protocols/Aws_json1_1.ts +++ b/clients/client-glue/src/protocols/Aws_json1_1.ts @@ -437,7 +437,6 @@ import { CreateWorkflowResponse, CreateXMLClassifierRequest, CsvClassifier, - Database, DatabaseIdentifier, DatabaseInput, DataLakePrincipal, @@ -526,13 +525,13 @@ import { GetCrawlerResponse, GetCrawlersRequest, GetCrawlersResponse, - GetDatabaseRequest, GlueEncryptionException, GlueTable, GrokClassifier, IdempotentParameterMismatchException, InternalServiceException, InvalidInputException, + InvalidStateException, JdbcTarget, Job, JobBookmarksEncryption, @@ -562,6 +561,7 @@ import { PrincipalPermissions, RecrawlPolicy, RegistryId, + ResourceNotReadyException, ResourceNumberLimitExceededException, ResourceUri, S3Encryption, @@ -603,6 +603,7 @@ import { ConnectionPasswordEncryption, CrawlerNotRunningException, CrawlerStoppingException, + Database, DataCatalogEncryptionSettings, DevEndpointCustomLibraries, EncryptionAtRest, @@ -610,6 +611,7 @@ import { ExportLabelsTaskRunProperties, FindMatchesMetrics, FindMatchesTaskRunProperties, + GetDatabaseRequest, GetDatabaseResponse, GetDatabasesRequest, GetDatabasesResponse, @@ -3285,6 +3287,14 @@ const deserializeAws_json1_1BatchDeleteTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "GlueEncryptionException": + case "com.amazonaws.glue#GlueEncryptionException": + response = { + ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServiceException": case "com.amazonaws.glue#InternalServiceException": response = { @@ -3309,6 +3319,14 @@ const deserializeAws_json1_1BatchDeleteTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -3745,6 +3763,14 @@ const deserializeAws_json1_1BatchGetPartitionCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "InvalidStateException": + case "com.amazonaws.glue#InvalidStateException": + response = { + ...(await deserializeAws_json1_1InvalidStateExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "OperationTimeoutException": case "com.amazonaws.glue#OperationTimeoutException": response = { @@ -5575,6 +5601,14 @@ const deserializeAws_json1_1CreateTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ResourceNumberLimitExceededException": case "com.amazonaws.glue#ResourceNumberLimitExceededException": response = { @@ -7301,6 +7335,14 @@ const deserializeAws_json1_1DeleteTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -10013,6 +10055,14 @@ const deserializeAws_json1_1GetPartitionsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "InvalidStateException": + case "com.amazonaws.glue#InvalidStateException": + response = { + ...(await deserializeAws_json1_1InvalidStateExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "OperationTimeoutException": case "com.amazonaws.glue#OperationTimeoutException": response = { @@ -10021,6 +10071,14 @@ const deserializeAws_json1_1GetPartitionsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -10879,6 +10937,14 @@ const deserializeAws_json1_1GetTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -16127,6 +16193,14 @@ const deserializeAws_json1_1UpdateTableCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ResourceNotReadyException": + case "com.amazonaws.glue#ResourceNotReadyException": + response = { + ...(await deserializeAws_json1_1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "ResourceNumberLimitExceededException": case "com.amazonaws.glue#ResourceNumberLimitExceededException": response = { @@ -16650,6 +16724,21 @@ const deserializeAws_json1_1InvalidInputExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1InvalidStateExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1InvalidStateException(body, context); + const contents: InvalidStateException = { + name: "InvalidStateException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1MLTransformNotReadyExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -16695,6 +16784,21 @@ const deserializeAws_json1_1OperationTimeoutExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1ResourceNotReadyExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1ResourceNotReadyException(body, context); + const contents: ResourceNotReadyException = { + name: "ResourceNotReadyException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1ResourceNumberLimitExceededExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -16903,6 +17007,7 @@ const serializeAws_json1_1BatchDeleteTableRequest = (input: BatchDeleteTableRequ input.TablesToDelete !== null && { TablesToDelete: serializeAws_json1_1BatchDeleteTableNameList(input.TablesToDelete, context), }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -17780,6 +17885,7 @@ const serializeAws_json1_1CreateTableRequest = (input: CreateTableRequest, conte }), ...(input.TableInput !== undefined && input.TableInput !== null && { TableInput: serializeAws_json1_1TableInput(input.TableInput, context) }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -18139,6 +18245,7 @@ const serializeAws_json1_1DeleteTableRequest = (input: DeleteTableRequest, conte ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -18612,9 +18719,12 @@ const serializeAws_json1_1GetPartitionsRequest = (input: GetPartitionsRequest, c ...(input.Expression !== undefined && input.Expression !== null && { Expression: input.Expression }), ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.QueryAsOfTime !== undefined && + input.QueryAsOfTime !== null && { QueryAsOfTime: Math.round(input.QueryAsOfTime.getTime() / 1000) }), ...(input.Segment !== undefined && input.Segment !== null && { Segment: serializeAws_json1_1Segment(input.Segment, context) }), ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -18738,6 +18848,9 @@ const serializeAws_json1_1GetTableRequest = (input: GetTableRequest, context: __ ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.QueryAsOfTime !== undefined && + input.QueryAsOfTime !== null && { QueryAsOfTime: Math.round(input.QueryAsOfTime.getTime() / 1000) }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -18748,6 +18861,9 @@ const serializeAws_json1_1GetTablesRequest = (input: GetTablesRequest, context: ...(input.Expression !== undefined && input.Expression !== null && { Expression: input.Expression }), ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.QueryAsOfTime !== undefined && + input.QueryAsOfTime !== null && { QueryAsOfTime: Math.round(input.QueryAsOfTime.getTime() / 1000) }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -20404,6 +20520,7 @@ const serializeAws_json1_1UpdateTableRequest = (input: UpdateTableRequest, conte ...(input.SkipArchive !== undefined && input.SkipArchive !== null && { SkipArchive: input.SkipArchive }), ...(input.TableInput !== undefined && input.TableInput !== null && { TableInput: serializeAws_json1_1TableInput(input.TableInput, context) }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), }; }; @@ -23206,6 +23323,12 @@ const deserializeAws_json1_1InvalidInputException = (output: any, context: __Ser } as any; }; +const deserializeAws_json1_1InvalidStateException = (output: any, context: __SerdeContext): InvalidStateException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1JdbcTarget = (output: any, context: __SerdeContext): JdbcTarget => { return { ConnectionName: __expectString(output.ConnectionName), @@ -24220,6 +24343,15 @@ const deserializeAws_json1_1ResetJobBookmarkResponse = ( } as any; }; +const deserializeAws_json1_1ResourceNotReadyException = ( + output: any, + context: __SerdeContext +): ResourceNotReadyException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1ResourceNumberLimitExceededException = ( output: any, context: __SerdeContext diff --git a/clients/client-inspector2/.gitignore b/clients/client-inspector2/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-inspector2/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-inspector2/LICENSE b/clients/client-inspector2/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-inspector2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-inspector2/README.md b/clients/client-inspector2/README.md new file mode 100644 index 000000000000..05bac2787b33 --- /dev/null +++ b/clients/client-inspector2/README.md @@ -0,0 +1,204 @@ +# @aws-sdk/client-inspector2 + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-inspector2/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-inspector2) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-inspector2.svg)](https://www.npmjs.com/package/@aws-sdk/client-inspector2) + +## Description + +AWS SDK for JavaScript Inspector2 Client for Node.js, Browser and React Native. + +

                                  Amazon Inspector is a vulnerability discovery service that automates continuous scanning for +security vulnerabilities within your Amazon EC2 and Amazon ECR environments.

                                  + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-inspector2 +using your favorite package manager: + +- `npm install @aws-sdk/client-inspector2` +- `yarn add @aws-sdk/client-inspector2` +- `pnpm add @aws-sdk/client-inspector2` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `Inspector2Client` and +the commands you need, for example `AssociateMemberCommand`: + +```js +// ES5 example +const { Inspector2Client, AssociateMemberCommand } = require("@aws-sdk/client-inspector2"); +``` + +```ts +// ES6+ example +import { Inspector2Client, AssociateMemberCommand } from "@aws-sdk/client-inspector2"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new Inspector2Client({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new AssociateMemberCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-inspector2"; +const client = new AWS.Inspector2({ region: "REGION" }); + +// async/await. +try { + const data = await client.associateMember(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .associateMember(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.associateMember(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-inspector2` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-inspector2/jest.config.js b/clients/client-inspector2/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-inspector2/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-inspector2/package.json b/clients/client-inspector2/package.json new file mode 100644 index 000000000000..9f534f5b8ad0 --- /dev/null +++ b/clients/client-inspector2/package.json @@ -0,0 +1,96 @@ +{ + "name": "@aws-sdk/client-inspector2", + "description": "AWS SDK for JavaScript Inspector2 Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0", + "uuid": "^8.3.2" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "@types/uuid": "^8.3.0", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-inspector2", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-inspector2" + } +} diff --git a/clients/client-inspector2/src/Inspector2.ts b/clients/client-inspector2/src/Inspector2.ts new file mode 100644 index 000000000000..2f444a3767f5 --- /dev/null +++ b/clients/client-inspector2/src/Inspector2.ts @@ -0,0 +1,1048 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + AssociateMemberCommand, + AssociateMemberCommandInput, + AssociateMemberCommandOutput, +} from "./commands/AssociateMemberCommand"; +import { + BatchGetAccountStatusCommand, + BatchGetAccountStatusCommandInput, + BatchGetAccountStatusCommandOutput, +} from "./commands/BatchGetAccountStatusCommand"; +import { + BatchGetFreeTrialInfoCommand, + BatchGetFreeTrialInfoCommandInput, + BatchGetFreeTrialInfoCommandOutput, +} from "./commands/BatchGetFreeTrialInfoCommand"; +import { + CancelFindingsReportCommand, + CancelFindingsReportCommandInput, + CancelFindingsReportCommandOutput, +} from "./commands/CancelFindingsReportCommand"; +import { + CreateFilterCommand, + CreateFilterCommandInput, + CreateFilterCommandOutput, +} from "./commands/CreateFilterCommand"; +import { + CreateFindingsReportCommand, + CreateFindingsReportCommandInput, + CreateFindingsReportCommandOutput, +} from "./commands/CreateFindingsReportCommand"; +import { + DeleteFilterCommand, + DeleteFilterCommandInput, + DeleteFilterCommandOutput, +} from "./commands/DeleteFilterCommand"; +import { + DescribeOrganizationConfigurationCommand, + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "./commands/DescribeOrganizationConfigurationCommand"; +import { DisableCommand, DisableCommandInput, DisableCommandOutput } from "./commands/DisableCommand"; +import { + DisableDelegatedAdminAccountCommand, + DisableDelegatedAdminAccountCommandInput, + DisableDelegatedAdminAccountCommandOutput, +} from "./commands/DisableDelegatedAdminAccountCommand"; +import { + DisassociateMemberCommand, + DisassociateMemberCommandInput, + DisassociateMemberCommandOutput, +} from "./commands/DisassociateMemberCommand"; +import { EnableCommand, EnableCommandInput, EnableCommandOutput } from "./commands/EnableCommand"; +import { + EnableDelegatedAdminAccountCommand, + EnableDelegatedAdminAccountCommandInput, + EnableDelegatedAdminAccountCommandOutput, +} from "./commands/EnableDelegatedAdminAccountCommand"; +import { + GetDelegatedAdminAccountCommand, + GetDelegatedAdminAccountCommandInput, + GetDelegatedAdminAccountCommandOutput, +} from "./commands/GetDelegatedAdminAccountCommand"; +import { + GetFindingsReportStatusCommand, + GetFindingsReportStatusCommandInput, + GetFindingsReportStatusCommandOutput, +} from "./commands/GetFindingsReportStatusCommand"; +import { GetMemberCommand, GetMemberCommandInput, GetMemberCommandOutput } from "./commands/GetMemberCommand"; +import { + ListAccountPermissionsCommand, + ListAccountPermissionsCommandInput, + ListAccountPermissionsCommandOutput, +} from "./commands/ListAccountPermissionsCommand"; +import { + ListCoverageCommand, + ListCoverageCommandInput, + ListCoverageCommandOutput, +} from "./commands/ListCoverageCommand"; +import { + ListCoverageStatisticsCommand, + ListCoverageStatisticsCommandInput, + ListCoverageStatisticsCommandOutput, +} from "./commands/ListCoverageStatisticsCommand"; +import { + ListDelegatedAdminAccountsCommand, + ListDelegatedAdminAccountsCommandInput, + ListDelegatedAdminAccountsCommandOutput, +} from "./commands/ListDelegatedAdminAccountsCommand"; +import { ListFiltersCommand, ListFiltersCommandInput, ListFiltersCommandOutput } from "./commands/ListFiltersCommand"; +import { + ListFindingAggregationsCommand, + ListFindingAggregationsCommandInput, + ListFindingAggregationsCommandOutput, +} from "./commands/ListFindingAggregationsCommand"; +import { + ListFindingsCommand, + ListFindingsCommandInput, + ListFindingsCommandOutput, +} from "./commands/ListFindingsCommand"; +import { ListMembersCommand, ListMembersCommandInput, ListMembersCommandOutput } from "./commands/ListMembersCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListUsageTotalsCommand, + ListUsageTotalsCommandInput, + ListUsageTotalsCommandOutput, +} from "./commands/ListUsageTotalsCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateFilterCommand, + UpdateFilterCommandInput, + UpdateFilterCommandOutput, +} from "./commands/UpdateFilterCommand"; +import { + UpdateOrganizationConfigurationCommand, + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "./commands/UpdateOrganizationConfigurationCommand"; +import { Inspector2Client } from "./Inspector2Client"; + +/** + *

                                  Amazon Inspector is a vulnerability discovery service that automates continuous scanning for + * security vulnerabilities within your Amazon EC2 and Amazon ECR environments.

                                  + */ +export class Inspector2 extends Inspector2Client { + /** + *

                                  Associates an Amazon Web Services account with an Amazon Inspector delegated administrator.

                                  + */ + public associateMember( + args: AssociateMemberCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateMember( + args: AssociateMemberCommandInput, + cb: (err: any, data?: AssociateMemberCommandOutput) => void + ): void; + public associateMember( + args: AssociateMemberCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateMemberCommandOutput) => void + ): void; + public associateMember( + args: AssociateMemberCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateMemberCommandOutput) => void), + cb?: (err: any, data?: AssociateMemberCommandOutput) => void + ): Promise | void { + const command = new AssociateMemberCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves the Amazon Inspector status of multiple Amazon Web Services accounts within your environment.

                                  + */ + public batchGetAccountStatus( + args: BatchGetAccountStatusCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public batchGetAccountStatus( + args: BatchGetAccountStatusCommandInput, + cb: (err: any, data?: BatchGetAccountStatusCommandOutput) => void + ): void; + public batchGetAccountStatus( + args: BatchGetAccountStatusCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: BatchGetAccountStatusCommandOutput) => void + ): void; + public batchGetAccountStatus( + args: BatchGetAccountStatusCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: BatchGetAccountStatusCommandOutput) => void), + cb?: (err: any, data?: BatchGetAccountStatusCommandOutput) => void + ): Promise | void { + const command = new BatchGetAccountStatusCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Gets free trial status for multiple Amazon Web Services accounts.

                                  + */ + public batchGetFreeTrialInfo( + args: BatchGetFreeTrialInfoCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public batchGetFreeTrialInfo( + args: BatchGetFreeTrialInfoCommandInput, + cb: (err: any, data?: BatchGetFreeTrialInfoCommandOutput) => void + ): void; + public batchGetFreeTrialInfo( + args: BatchGetFreeTrialInfoCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: BatchGetFreeTrialInfoCommandOutput) => void + ): void; + public batchGetFreeTrialInfo( + args: BatchGetFreeTrialInfoCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: BatchGetFreeTrialInfoCommandOutput) => void), + cb?: (err: any, data?: BatchGetFreeTrialInfoCommandOutput) => void + ): Promise | void { + const command = new BatchGetFreeTrialInfoCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Cancels the given findings report.

                                  + */ + public cancelFindingsReport( + args: CancelFindingsReportCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public cancelFindingsReport( + args: CancelFindingsReportCommandInput, + cb: (err: any, data?: CancelFindingsReportCommandOutput) => void + ): void; + public cancelFindingsReport( + args: CancelFindingsReportCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CancelFindingsReportCommandOutput) => void + ): void; + public cancelFindingsReport( + args: CancelFindingsReportCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CancelFindingsReportCommandOutput) => void), + cb?: (err: any, data?: CancelFindingsReportCommandOutput) => void + ): Promise | void { + const command = new CancelFindingsReportCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates a filter resource using specified filter criteria.

                                  + */ + public createFilter( + args: CreateFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createFilter(args: CreateFilterCommandInput, cb: (err: any, data?: CreateFilterCommandOutput) => void): void; + public createFilter( + args: CreateFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateFilterCommandOutput) => void + ): void; + public createFilter( + args: CreateFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateFilterCommandOutput) => void), + cb?: (err: any, data?: CreateFilterCommandOutput) => void + ): Promise | void { + const command = new CreateFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates a finding report.

                                  + */ + public createFindingsReport( + args: CreateFindingsReportCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createFindingsReport( + args: CreateFindingsReportCommandInput, + cb: (err: any, data?: CreateFindingsReportCommandOutput) => void + ): void; + public createFindingsReport( + args: CreateFindingsReportCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateFindingsReportCommandOutput) => void + ): void; + public createFindingsReport( + args: CreateFindingsReportCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateFindingsReportCommandOutput) => void), + cb?: (err: any, data?: CreateFindingsReportCommandOutput) => void + ): Promise | void { + const command = new CreateFindingsReportCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes a filter resource.

                                  + */ + public deleteFilter( + args: DeleteFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteFilter(args: DeleteFilterCommandInput, cb: (err: any, data?: DeleteFilterCommandOutput) => void): void; + public deleteFilter( + args: DeleteFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteFilterCommandOutput) => void + ): void; + public deleteFilter( + args: DeleteFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteFilterCommandOutput) => void), + cb?: (err: any, data?: DeleteFilterCommandOutput) => void + ): Promise | void { + const command = new DeleteFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Describe Amazon Inspector configuration settings for an Amazon Web Services organization

                                  + */ + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + cb: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): void; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): void; + public describeOrganizationConfiguration( + args: DescribeOrganizationConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void), + cb?: (err: any, data?: DescribeOrganizationConfigurationCommandOutput) => void + ): Promise | void { + const command = new DescribeOrganizationConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Disables Amazon Inspector scans for one or more Amazon Web Services accounts. Disabling all scan types in an account + * disables the Amazon Inspector service.

                                  + */ + public disable(args: DisableCommandInput, options?: __HttpHandlerOptions): Promise; + public disable(args: DisableCommandInput, cb: (err: any, data?: DisableCommandOutput) => void): void; + public disable( + args: DisableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisableCommandOutput) => void + ): void; + public disable( + args: DisableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisableCommandOutput) => void), + cb?: (err: any, data?: DisableCommandOutput) => void + ): Promise | void { + const command = new DisableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Disables the Amazon Inspector delegated administrator for your organization.

                                  + */ + public disableDelegatedAdminAccount( + args: DisableDelegatedAdminAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disableDelegatedAdminAccount( + args: DisableDelegatedAdminAccountCommandInput, + cb: (err: any, data?: DisableDelegatedAdminAccountCommandOutput) => void + ): void; + public disableDelegatedAdminAccount( + args: DisableDelegatedAdminAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisableDelegatedAdminAccountCommandOutput) => void + ): void; + public disableDelegatedAdminAccount( + args: DisableDelegatedAdminAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisableDelegatedAdminAccountCommandOutput) => void), + cb?: (err: any, data?: DisableDelegatedAdminAccountCommandOutput) => void + ): Promise | void { + const command = new DisableDelegatedAdminAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Disassociates a member account from an Amazon Inspector delegated administrator.

                                  + */ + public disassociateMember( + args: DisassociateMemberCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateMember( + args: DisassociateMemberCommandInput, + cb: (err: any, data?: DisassociateMemberCommandOutput) => void + ): void; + public disassociateMember( + args: DisassociateMemberCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateMemberCommandOutput) => void + ): void; + public disassociateMember( + args: DisassociateMemberCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateMemberCommandOutput) => void), + cb?: (err: any, data?: DisassociateMemberCommandOutput) => void + ): Promise | void { + const command = new DisassociateMemberCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Enables Amazon Inspector scans for one or more Amazon Web Services accounts.

                                  + */ + public enable(args: EnableCommandInput, options?: __HttpHandlerOptions): Promise; + public enable(args: EnableCommandInput, cb: (err: any, data?: EnableCommandOutput) => void): void; + public enable( + args: EnableCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: EnableCommandOutput) => void + ): void; + public enable( + args: EnableCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: EnableCommandOutput) => void), + cb?: (err: any, data?: EnableCommandOutput) => void + ): Promise | void { + const command = new EnableCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Enables the Amazon Inspector delegated administrator for your Organizations organization.

                                  + */ + public enableDelegatedAdminAccount( + args: EnableDelegatedAdminAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public enableDelegatedAdminAccount( + args: EnableDelegatedAdminAccountCommandInput, + cb: (err: any, data?: EnableDelegatedAdminAccountCommandOutput) => void + ): void; + public enableDelegatedAdminAccount( + args: EnableDelegatedAdminAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: EnableDelegatedAdminAccountCommandOutput) => void + ): void; + public enableDelegatedAdminAccount( + args: EnableDelegatedAdminAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: EnableDelegatedAdminAccountCommandOutput) => void), + cb?: (err: any, data?: EnableDelegatedAdminAccountCommandOutput) => void + ): Promise | void { + const command = new EnableDelegatedAdminAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves information about the Amazon Inspector delegated administrator for your + * organization.

                                  + */ + public getDelegatedAdminAccount( + args: GetDelegatedAdminAccountCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getDelegatedAdminAccount( + args: GetDelegatedAdminAccountCommandInput, + cb: (err: any, data?: GetDelegatedAdminAccountCommandOutput) => void + ): void; + public getDelegatedAdminAccount( + args: GetDelegatedAdminAccountCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetDelegatedAdminAccountCommandOutput) => void + ): void; + public getDelegatedAdminAccount( + args: GetDelegatedAdminAccountCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetDelegatedAdminAccountCommandOutput) => void), + cb?: (err: any, data?: GetDelegatedAdminAccountCommandOutput) => void + ): Promise | void { + const command = new GetDelegatedAdminAccountCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Gets the status of a findings report.

                                  + */ + public getFindingsReportStatus( + args: GetFindingsReportStatusCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getFindingsReportStatus( + args: GetFindingsReportStatusCommandInput, + cb: (err: any, data?: GetFindingsReportStatusCommandOutput) => void + ): void; + public getFindingsReportStatus( + args: GetFindingsReportStatusCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetFindingsReportStatusCommandOutput) => void + ): void; + public getFindingsReportStatus( + args: GetFindingsReportStatusCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetFindingsReportStatusCommandOutput) => void), + cb?: (err: any, data?: GetFindingsReportStatusCommandOutput) => void + ): Promise | void { + const command = new GetFindingsReportStatusCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Gets member information for your organization.

                                  + */ + public getMember(args: GetMemberCommandInput, options?: __HttpHandlerOptions): Promise; + public getMember(args: GetMemberCommandInput, cb: (err: any, data?: GetMemberCommandOutput) => void): void; + public getMember( + args: GetMemberCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetMemberCommandOutput) => void + ): void; + public getMember( + args: GetMemberCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetMemberCommandOutput) => void), + cb?: (err: any, data?: GetMemberCommandOutput) => void + ): Promise | void { + const command = new GetMemberCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists the permissions an account has to configure Amazon Inspector.

                                  + */ + public listAccountPermissions( + args: ListAccountPermissionsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listAccountPermissions( + args: ListAccountPermissionsCommandInput, + cb: (err: any, data?: ListAccountPermissionsCommandOutput) => void + ): void; + public listAccountPermissions( + args: ListAccountPermissionsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListAccountPermissionsCommandOutput) => void + ): void; + public listAccountPermissions( + args: ListAccountPermissionsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListAccountPermissionsCommandOutput) => void), + cb?: (err: any, data?: ListAccountPermissionsCommandOutput) => void + ): Promise | void { + const command = new ListAccountPermissionsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists coverage details for you environment.

                                  + */ + public listCoverage( + args: ListCoverageCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listCoverage(args: ListCoverageCommandInput, cb: (err: any, data?: ListCoverageCommandOutput) => void): void; + public listCoverage( + args: ListCoverageCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListCoverageCommandOutput) => void + ): void; + public listCoverage( + args: ListCoverageCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListCoverageCommandOutput) => void), + cb?: (err: any, data?: ListCoverageCommandOutput) => void + ): Promise | void { + const command = new ListCoverageCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists Amazon Inspector coverage statistics for your environment.

                                  + */ + public listCoverageStatistics( + args: ListCoverageStatisticsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listCoverageStatistics( + args: ListCoverageStatisticsCommandInput, + cb: (err: any, data?: ListCoverageStatisticsCommandOutput) => void + ): void; + public listCoverageStatistics( + args: ListCoverageStatisticsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListCoverageStatisticsCommandOutput) => void + ): void; + public listCoverageStatistics( + args: ListCoverageStatisticsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListCoverageStatisticsCommandOutput) => void), + cb?: (err: any, data?: ListCoverageStatisticsCommandOutput) => void + ): Promise | void { + const command = new ListCoverageStatisticsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists information about the Amazon Inspector delegated administrator of your + * organization.

                                  + */ + public listDelegatedAdminAccounts( + args: ListDelegatedAdminAccountsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listDelegatedAdminAccounts( + args: ListDelegatedAdminAccountsCommandInput, + cb: (err: any, data?: ListDelegatedAdminAccountsCommandOutput) => void + ): void; + public listDelegatedAdminAccounts( + args: ListDelegatedAdminAccountsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListDelegatedAdminAccountsCommandOutput) => void + ): void; + public listDelegatedAdminAccounts( + args: ListDelegatedAdminAccountsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListDelegatedAdminAccountsCommandOutput) => void), + cb?: (err: any, data?: ListDelegatedAdminAccountsCommandOutput) => void + ): Promise | void { + const command = new ListDelegatedAdminAccountsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists the filters associated with your account.

                                  + */ + public listFilters(args: ListFiltersCommandInput, options?: __HttpHandlerOptions): Promise; + public listFilters(args: ListFiltersCommandInput, cb: (err: any, data?: ListFiltersCommandOutput) => void): void; + public listFilters( + args: ListFiltersCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListFiltersCommandOutput) => void + ): void; + public listFilters( + args: ListFiltersCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListFiltersCommandOutput) => void), + cb?: (err: any, data?: ListFiltersCommandOutput) => void + ): Promise | void { + const command = new ListFiltersCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists aggregated finding data for your environment based on specific criteria.

                                  + */ + public listFindingAggregations( + args: ListFindingAggregationsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listFindingAggregations( + args: ListFindingAggregationsCommandInput, + cb: (err: any, data?: ListFindingAggregationsCommandOutput) => void + ): void; + public listFindingAggregations( + args: ListFindingAggregationsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListFindingAggregationsCommandOutput) => void + ): void; + public listFindingAggregations( + args: ListFindingAggregationsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListFindingAggregationsCommandOutput) => void), + cb?: (err: any, data?: ListFindingAggregationsCommandOutput) => void + ): Promise | void { + const command = new ListFindingAggregationsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists findings for your environment.

                                  + */ + public listFindings( + args: ListFindingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listFindings(args: ListFindingsCommandInput, cb: (err: any, data?: ListFindingsCommandOutput) => void): void; + public listFindings( + args: ListFindingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListFindingsCommandOutput) => void + ): void; + public listFindings( + args: ListFindingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListFindingsCommandOutput) => void), + cb?: (err: any, data?: ListFindingsCommandOutput) => void + ): Promise | void { + const command = new ListFindingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  List members associated with the Amazon Inspector delegated administrator for your + * organization.

                                  + */ + public listMembers(args: ListMembersCommandInput, options?: __HttpHandlerOptions): Promise; + public listMembers(args: ListMembersCommandInput, cb: (err: any, data?: ListMembersCommandOutput) => void): void; + public listMembers( + args: ListMembersCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListMembersCommandOutput) => void + ): void; + public listMembers( + args: ListMembersCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListMembersCommandOutput) => void), + cb?: (err: any, data?: ListMembersCommandOutput) => void + ): Promise | void { + const command = new ListMembersCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists all tags attached to a given resource.

                                  + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists the Amazon Inspector usage totals over the last 30 days.

                                  + */ + public listUsageTotals( + args: ListUsageTotalsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listUsageTotals( + args: ListUsageTotalsCommandInput, + cb: (err: any, data?: ListUsageTotalsCommandOutput) => void + ): void; + public listUsageTotals( + args: ListUsageTotalsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListUsageTotalsCommandOutput) => void + ): void; + public listUsageTotals( + args: ListUsageTotalsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListUsageTotalsCommandOutput) => void), + cb?: (err: any, data?: ListUsageTotalsCommandOutput) => void + ): Promise | void { + const command = new ListUsageTotalsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Adds tags to a resource.

                                  + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Removes tags from a resource.

                                  + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Specifies the action that is to be applied to the findings that match the filter.

                                  + */ + public updateFilter( + args: UpdateFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateFilter(args: UpdateFilterCommandInput, cb: (err: any, data?: UpdateFilterCommandOutput) => void): void; + public updateFilter( + args: UpdateFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateFilterCommandOutput) => void + ): void; + public updateFilter( + args: UpdateFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateFilterCommandOutput) => void), + cb?: (err: any, data?: UpdateFilterCommandOutput) => void + ): Promise | void { + const command = new UpdateFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the configurations for your Amazon Inspector organization.

                                  + */ + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + cb: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): void; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): void; + public updateOrganizationConfiguration( + args: UpdateOrganizationConfigurationCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void), + cb?: (err: any, data?: UpdateOrganizationConfigurationCommandOutput) => void + ): Promise | void { + const command = new UpdateOrganizationConfigurationCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-inspector2/src/Inspector2Client.ts b/clients/client-inspector2/src/Inspector2Client.ts new file mode 100644 index 000000000000..4016de76d6ff --- /dev/null +++ b/clients/client-inspector2/src/Inspector2Client.ts @@ -0,0 +1,382 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { AssociateMemberCommandInput, AssociateMemberCommandOutput } from "./commands/AssociateMemberCommand"; +import { + BatchGetAccountStatusCommandInput, + BatchGetAccountStatusCommandOutput, +} from "./commands/BatchGetAccountStatusCommand"; +import { + BatchGetFreeTrialInfoCommandInput, + BatchGetFreeTrialInfoCommandOutput, +} from "./commands/BatchGetFreeTrialInfoCommand"; +import { + CancelFindingsReportCommandInput, + CancelFindingsReportCommandOutput, +} from "./commands/CancelFindingsReportCommand"; +import { CreateFilterCommandInput, CreateFilterCommandOutput } from "./commands/CreateFilterCommand"; +import { + CreateFindingsReportCommandInput, + CreateFindingsReportCommandOutput, +} from "./commands/CreateFindingsReportCommand"; +import { DeleteFilterCommandInput, DeleteFilterCommandOutput } from "./commands/DeleteFilterCommand"; +import { + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "./commands/DescribeOrganizationConfigurationCommand"; +import { DisableCommandInput, DisableCommandOutput } from "./commands/DisableCommand"; +import { + DisableDelegatedAdminAccountCommandInput, + DisableDelegatedAdminAccountCommandOutput, +} from "./commands/DisableDelegatedAdminAccountCommand"; +import { DisassociateMemberCommandInput, DisassociateMemberCommandOutput } from "./commands/DisassociateMemberCommand"; +import { EnableCommandInput, EnableCommandOutput } from "./commands/EnableCommand"; +import { + EnableDelegatedAdminAccountCommandInput, + EnableDelegatedAdminAccountCommandOutput, +} from "./commands/EnableDelegatedAdminAccountCommand"; +import { + GetDelegatedAdminAccountCommandInput, + GetDelegatedAdminAccountCommandOutput, +} from "./commands/GetDelegatedAdminAccountCommand"; +import { + GetFindingsReportStatusCommandInput, + GetFindingsReportStatusCommandOutput, +} from "./commands/GetFindingsReportStatusCommand"; +import { GetMemberCommandInput, GetMemberCommandOutput } from "./commands/GetMemberCommand"; +import { + ListAccountPermissionsCommandInput, + ListAccountPermissionsCommandOutput, +} from "./commands/ListAccountPermissionsCommand"; +import { ListCoverageCommandInput, ListCoverageCommandOutput } from "./commands/ListCoverageCommand"; +import { + ListCoverageStatisticsCommandInput, + ListCoverageStatisticsCommandOutput, +} from "./commands/ListCoverageStatisticsCommand"; +import { + ListDelegatedAdminAccountsCommandInput, + ListDelegatedAdminAccountsCommandOutput, +} from "./commands/ListDelegatedAdminAccountsCommand"; +import { ListFiltersCommandInput, ListFiltersCommandOutput } from "./commands/ListFiltersCommand"; +import { + ListFindingAggregationsCommandInput, + ListFindingAggregationsCommandOutput, +} from "./commands/ListFindingAggregationsCommand"; +import { ListFindingsCommandInput, ListFindingsCommandOutput } from "./commands/ListFindingsCommand"; +import { ListMembersCommandInput, ListMembersCommandOutput } from "./commands/ListMembersCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { ListUsageTotalsCommandInput, ListUsageTotalsCommandOutput } from "./commands/ListUsageTotalsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { UpdateFilterCommandInput, UpdateFilterCommandOutput } from "./commands/UpdateFilterCommand"; +import { + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "./commands/UpdateOrganizationConfigurationCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | AssociateMemberCommandInput + | BatchGetAccountStatusCommandInput + | BatchGetFreeTrialInfoCommandInput + | CancelFindingsReportCommandInput + | CreateFilterCommandInput + | CreateFindingsReportCommandInput + | DeleteFilterCommandInput + | DescribeOrganizationConfigurationCommandInput + | DisableCommandInput + | DisableDelegatedAdminAccountCommandInput + | DisassociateMemberCommandInput + | EnableCommandInput + | EnableDelegatedAdminAccountCommandInput + | GetDelegatedAdminAccountCommandInput + | GetFindingsReportStatusCommandInput + | GetMemberCommandInput + | ListAccountPermissionsCommandInput + | ListCoverageCommandInput + | ListCoverageStatisticsCommandInput + | ListDelegatedAdminAccountsCommandInput + | ListFiltersCommandInput + | ListFindingAggregationsCommandInput + | ListFindingsCommandInput + | ListMembersCommandInput + | ListTagsForResourceCommandInput + | ListUsageTotalsCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateFilterCommandInput + | UpdateOrganizationConfigurationCommandInput; + +export type ServiceOutputTypes = + | AssociateMemberCommandOutput + | BatchGetAccountStatusCommandOutput + | BatchGetFreeTrialInfoCommandOutput + | CancelFindingsReportCommandOutput + | CreateFilterCommandOutput + | CreateFindingsReportCommandOutput + | DeleteFilterCommandOutput + | DescribeOrganizationConfigurationCommandOutput + | DisableCommandOutput + | DisableDelegatedAdminAccountCommandOutput + | DisassociateMemberCommandOutput + | EnableCommandOutput + | EnableDelegatedAdminAccountCommandOutput + | GetDelegatedAdminAccountCommandOutput + | GetFindingsReportStatusCommandOutput + | GetMemberCommandOutput + | ListAccountPermissionsCommandOutput + | ListCoverageCommandOutput + | ListCoverageStatisticsCommandOutput + | ListDelegatedAdminAccountsCommandOutput + | ListFiltersCommandOutput + | ListFindingAggregationsCommandOutput + | ListFindingsCommandOutput + | ListMembersCommandOutput + | ListTagsForResourceCommandOutput + | ListUsageTotalsCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateFilterCommandOutput + | UpdateOrganizationConfigurationCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type Inspector2ClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of Inspector2Client class constructor that set the region, credentials and other options. + */ +export interface Inspector2ClientConfig extends Inspector2ClientConfigType {} + +type Inspector2ClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of Inspector2Client class. This is resolved and normalized from the {@link Inspector2ClientConfig | constructor configuration interface}. + */ +export interface Inspector2ClientResolvedConfig extends Inspector2ClientResolvedConfigType {} + +/** + *

                                  Amazon Inspector is a vulnerability discovery service that automates continuous scanning for + * security vulnerabilities within your Amazon EC2 and Amazon ECR environments.

                                  + */ +export class Inspector2Client extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + Inspector2ClientResolvedConfig +> { + /** + * The resolved configuration of Inspector2Client class. This is resolved and normalized from the {@link Inspector2ClientConfig | constructor configuration interface}. + */ + readonly config: Inspector2ClientResolvedConfig; + + constructor(configuration: Inspector2ClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-inspector2/src/commands/AssociateMemberCommand.ts b/clients/client-inspector2/src/commands/AssociateMemberCommand.ts new file mode 100644 index 000000000000..b539f9378105 --- /dev/null +++ b/clients/client-inspector2/src/commands/AssociateMemberCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { AssociateMemberRequest, AssociateMemberResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateMemberCommand, + serializeAws_restJson1AssociateMemberCommand, +} from "../protocols/Aws_restJson1"; + +export interface AssociateMemberCommandInput extends AssociateMemberRequest {} +export interface AssociateMemberCommandOutput extends AssociateMemberResponse, __MetadataBearer {} + +/** + *

                                  Associates an Amazon Web Services account with an Amazon Inspector delegated administrator.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, AssociateMemberCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, AssociateMemberCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new AssociateMemberCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateMemberCommandInput} for command's `input` shape. + * @see {@link AssociateMemberCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class AssociateMemberCommand extends $Command< + AssociateMemberCommandInput, + AssociateMemberCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateMemberCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "AssociateMemberCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateMemberRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateMemberResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateMemberCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateMemberCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1AssociateMemberCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/BatchGetAccountStatusCommand.ts b/clients/client-inspector2/src/commands/BatchGetAccountStatusCommand.ts new file mode 100644 index 000000000000..18d2d835174b --- /dev/null +++ b/clients/client-inspector2/src/commands/BatchGetAccountStatusCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { BatchGetAccountStatusRequest, BatchGetAccountStatusResponse } from "../models/models_0"; +import { + deserializeAws_restJson1BatchGetAccountStatusCommand, + serializeAws_restJson1BatchGetAccountStatusCommand, +} from "../protocols/Aws_restJson1"; + +export interface BatchGetAccountStatusCommandInput extends BatchGetAccountStatusRequest {} +export interface BatchGetAccountStatusCommandOutput extends BatchGetAccountStatusResponse, __MetadataBearer {} + +/** + *

                                  Retrieves the Amazon Inspector status of multiple Amazon Web Services accounts within your environment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, BatchGetAccountStatusCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, BatchGetAccountStatusCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new BatchGetAccountStatusCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link BatchGetAccountStatusCommandInput} for command's `input` shape. + * @see {@link BatchGetAccountStatusCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class BatchGetAccountStatusCommand extends $Command< + BatchGetAccountStatusCommandInput, + BatchGetAccountStatusCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchGetAccountStatusCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "BatchGetAccountStatusCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: BatchGetAccountStatusRequest.filterSensitiveLog, + outputFilterSensitiveLog: BatchGetAccountStatusResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: BatchGetAccountStatusCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1BatchGetAccountStatusCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1BatchGetAccountStatusCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/BatchGetFreeTrialInfoCommand.ts b/clients/client-inspector2/src/commands/BatchGetFreeTrialInfoCommand.ts new file mode 100644 index 000000000000..89fc8cf3b71a --- /dev/null +++ b/clients/client-inspector2/src/commands/BatchGetFreeTrialInfoCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { BatchGetFreeTrialInfoRequest, BatchGetFreeTrialInfoResponse } from "../models/models_0"; +import { + deserializeAws_restJson1BatchGetFreeTrialInfoCommand, + serializeAws_restJson1BatchGetFreeTrialInfoCommand, +} from "../protocols/Aws_restJson1"; + +export interface BatchGetFreeTrialInfoCommandInput extends BatchGetFreeTrialInfoRequest {} +export interface BatchGetFreeTrialInfoCommandOutput extends BatchGetFreeTrialInfoResponse, __MetadataBearer {} + +/** + *

                                  Gets free trial status for multiple Amazon Web Services accounts.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, BatchGetFreeTrialInfoCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, BatchGetFreeTrialInfoCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new BatchGetFreeTrialInfoCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link BatchGetFreeTrialInfoCommandInput} for command's `input` shape. + * @see {@link BatchGetFreeTrialInfoCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class BatchGetFreeTrialInfoCommand extends $Command< + BatchGetFreeTrialInfoCommandInput, + BatchGetFreeTrialInfoCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchGetFreeTrialInfoCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "BatchGetFreeTrialInfoCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: BatchGetFreeTrialInfoRequest.filterSensitiveLog, + outputFilterSensitiveLog: BatchGetFreeTrialInfoResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: BatchGetFreeTrialInfoCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1BatchGetFreeTrialInfoCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1BatchGetFreeTrialInfoCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/CancelFindingsReportCommand.ts b/clients/client-inspector2/src/commands/CancelFindingsReportCommand.ts new file mode 100644 index 000000000000..6bb58a03d13a --- /dev/null +++ b/clients/client-inspector2/src/commands/CancelFindingsReportCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { CancelFindingsReportRequest, CancelFindingsReportResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CancelFindingsReportCommand, + serializeAws_restJson1CancelFindingsReportCommand, +} from "../protocols/Aws_restJson1"; + +export interface CancelFindingsReportCommandInput extends CancelFindingsReportRequest {} +export interface CancelFindingsReportCommandOutput extends CancelFindingsReportResponse, __MetadataBearer {} + +/** + *

                                  Cancels the given findings report.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, CancelFindingsReportCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, CancelFindingsReportCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new CancelFindingsReportCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CancelFindingsReportCommandInput} for command's `input` shape. + * @see {@link CancelFindingsReportCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class CancelFindingsReportCommand extends $Command< + CancelFindingsReportCommandInput, + CancelFindingsReportCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CancelFindingsReportCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "CancelFindingsReportCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CancelFindingsReportRequest.filterSensitiveLog, + outputFilterSensitiveLog: CancelFindingsReportResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CancelFindingsReportCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CancelFindingsReportCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CancelFindingsReportCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/CreateFilterCommand.ts b/clients/client-inspector2/src/commands/CreateFilterCommand.ts new file mode 100644 index 000000000000..689689dd9205 --- /dev/null +++ b/clients/client-inspector2/src/commands/CreateFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { CreateFilterRequest, CreateFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateFilterCommand, + serializeAws_restJson1CreateFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateFilterCommandInput extends CreateFilterRequest {} +export interface CreateFilterCommandOutput extends CreateFilterResponse, __MetadataBearer {} + +/** + *

                                  Creates a filter resource using specified filter criteria.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, CreateFilterCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, CreateFilterCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new CreateFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateFilterCommandInput} for command's `input` shape. + * @see {@link CreateFilterCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class CreateFilterCommand extends $Command< + CreateFilterCommandInput, + CreateFilterCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "CreateFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/CreateFindingsReportCommand.ts b/clients/client-inspector2/src/commands/CreateFindingsReportCommand.ts new file mode 100644 index 000000000000..2214ed0e7329 --- /dev/null +++ b/clients/client-inspector2/src/commands/CreateFindingsReportCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { CreateFindingsReportRequest, CreateFindingsReportResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateFindingsReportCommand, + serializeAws_restJson1CreateFindingsReportCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateFindingsReportCommandInput extends CreateFindingsReportRequest {} +export interface CreateFindingsReportCommandOutput extends CreateFindingsReportResponse, __MetadataBearer {} + +/** + *

                                  Creates a finding report.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, CreateFindingsReportCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, CreateFindingsReportCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new CreateFindingsReportCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateFindingsReportCommandInput} for command's `input` shape. + * @see {@link CreateFindingsReportCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class CreateFindingsReportCommand extends $Command< + CreateFindingsReportCommandInput, + CreateFindingsReportCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateFindingsReportCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "CreateFindingsReportCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateFindingsReportRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateFindingsReportResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateFindingsReportCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateFindingsReportCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateFindingsReportCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/DeleteFilterCommand.ts b/clients/client-inspector2/src/commands/DeleteFilterCommand.ts new file mode 100644 index 000000000000..abead9c0c574 --- /dev/null +++ b/clients/client-inspector2/src/commands/DeleteFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { DeleteFilterRequest, DeleteFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteFilterCommand, + serializeAws_restJson1DeleteFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteFilterCommandInput extends DeleteFilterRequest {} +export interface DeleteFilterCommandOutput extends DeleteFilterResponse, __MetadataBearer {} + +/** + *

                                  Deletes a filter resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, DeleteFilterCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, DeleteFilterCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new DeleteFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteFilterCommandInput} for command's `input` shape. + * @see {@link DeleteFilterCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class DeleteFilterCommand extends $Command< + DeleteFilterCommandInput, + DeleteFilterCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "DeleteFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/DescribeOrganizationConfigurationCommand.ts b/clients/client-inspector2/src/commands/DescribeOrganizationConfigurationCommand.ts new file mode 100644 index 000000000000..c34806e2e678 --- /dev/null +++ b/clients/client-inspector2/src/commands/DescribeOrganizationConfigurationCommand.ts @@ -0,0 +1,106 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { + DescribeOrganizationConfigurationRequest, + DescribeOrganizationConfigurationResponse, +} from "../models/models_0"; +import { + deserializeAws_restJson1DescribeOrganizationConfigurationCommand, + serializeAws_restJson1DescribeOrganizationConfigurationCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeOrganizationConfigurationCommandInput extends DescribeOrganizationConfigurationRequest {} +export interface DescribeOrganizationConfigurationCommandOutput + extends DescribeOrganizationConfigurationResponse, + __MetadataBearer {} + +/** + *

                                  Describe Amazon Inspector configuration settings for an Amazon Web Services organization

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, DescribeOrganizationConfigurationCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, DescribeOrganizationConfigurationCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new DescribeOrganizationConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeOrganizationConfigurationCommandInput} for command's `input` shape. + * @see {@link DescribeOrganizationConfigurationCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class DescribeOrganizationConfigurationCommand extends $Command< + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeOrganizationConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "DescribeOrganizationConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeOrganizationConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeOrganizationConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: DescribeOrganizationConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeOrganizationConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DescribeOrganizationConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/DisableCommand.ts b/clients/client-inspector2/src/commands/DisableCommand.ts new file mode 100644 index 000000000000..c8802879c4e1 --- /dev/null +++ b/clients/client-inspector2/src/commands/DisableCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { DisableRequest, DisableResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisableCommand, + serializeAws_restJson1DisableCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisableCommandInput extends DisableRequest {} +export interface DisableCommandOutput extends DisableResponse, __MetadataBearer {} + +/** + *

                                  Disables Amazon Inspector scans for one or more Amazon Web Services accounts. Disabling all scan types in an account + * disables the Amazon Inspector service.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, DisableCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, DisableCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new DisableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisableCommandInput} for command's `input` shape. + * @see {@link DisableCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class DisableCommand extends $Command< + DisableCommandInput, + DisableCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "DisableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisableRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DisableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/DisableDelegatedAdminAccountCommand.ts b/clients/client-inspector2/src/commands/DisableDelegatedAdminAccountCommand.ts new file mode 100644 index 000000000000..6982c718d23a --- /dev/null +++ b/clients/client-inspector2/src/commands/DisableDelegatedAdminAccountCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { DisableDelegatedAdminAccountRequest, DisableDelegatedAdminAccountResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisableDelegatedAdminAccountCommand, + serializeAws_restJson1DisableDelegatedAdminAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisableDelegatedAdminAccountCommandInput extends DisableDelegatedAdminAccountRequest {} +export interface DisableDelegatedAdminAccountCommandOutput + extends DisableDelegatedAdminAccountResponse, + __MetadataBearer {} + +/** + *

                                  Disables the Amazon Inspector delegated administrator for your organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, DisableDelegatedAdminAccountCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, DisableDelegatedAdminAccountCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new DisableDelegatedAdminAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisableDelegatedAdminAccountCommandInput} for command's `input` shape. + * @see {@link DisableDelegatedAdminAccountCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class DisableDelegatedAdminAccountCommand extends $Command< + DisableDelegatedAdminAccountCommandInput, + DisableDelegatedAdminAccountCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisableDelegatedAdminAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "DisableDelegatedAdminAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisableDelegatedAdminAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisableDelegatedAdminAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisableDelegatedAdminAccountCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisableDelegatedAdminAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DisableDelegatedAdminAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/DisassociateMemberCommand.ts b/clients/client-inspector2/src/commands/DisassociateMemberCommand.ts new file mode 100644 index 000000000000..c61f43b3969a --- /dev/null +++ b/clients/client-inspector2/src/commands/DisassociateMemberCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { DisassociateMemberRequest, DisassociateMemberResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateMemberCommand, + serializeAws_restJson1DisassociateMemberCommand, +} from "../protocols/Aws_restJson1"; + +export interface DisassociateMemberCommandInput extends DisassociateMemberRequest {} +export interface DisassociateMemberCommandOutput extends DisassociateMemberResponse, __MetadataBearer {} + +/** + *

                                  Disassociates a member account from an Amazon Inspector delegated administrator.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, DisassociateMemberCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, DisassociateMemberCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new DisassociateMemberCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateMemberCommandInput} for command's `input` shape. + * @see {@link DisassociateMemberCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class DisassociateMemberCommand extends $Command< + DisassociateMemberCommandInput, + DisassociateMemberCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateMemberCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "DisassociateMemberCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateMemberRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateMemberResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateMemberCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateMemberCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DisassociateMemberCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/EnableCommand.ts b/clients/client-inspector2/src/commands/EnableCommand.ts new file mode 100644 index 000000000000..19a5ea0ef6bf --- /dev/null +++ b/clients/client-inspector2/src/commands/EnableCommand.ts @@ -0,0 +1,88 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { EnableRequest, EnableResponse } from "../models/models_0"; +import { deserializeAws_restJson1EnableCommand, serializeAws_restJson1EnableCommand } from "../protocols/Aws_restJson1"; + +export interface EnableCommandInput extends EnableRequest {} +export interface EnableCommandOutput extends EnableResponse, __MetadataBearer {} + +/** + *

                                  Enables Amazon Inspector scans for one or more Amazon Web Services accounts.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, EnableCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, EnableCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new EnableCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EnableCommandInput} for command's `input` shape. + * @see {@link EnableCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class EnableCommand extends $Command { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "EnableCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: EnableRequest.filterSensitiveLog, + outputFilterSensitiveLog: EnableResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: EnableCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1EnableCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1EnableCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/EnableDelegatedAdminAccountCommand.ts b/clients/client-inspector2/src/commands/EnableDelegatedAdminAccountCommand.ts new file mode 100644 index 000000000000..5e8492aa02d9 --- /dev/null +++ b/clients/client-inspector2/src/commands/EnableDelegatedAdminAccountCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { EnableDelegatedAdminAccountRequest, EnableDelegatedAdminAccountResponse } from "../models/models_0"; +import { + deserializeAws_restJson1EnableDelegatedAdminAccountCommand, + serializeAws_restJson1EnableDelegatedAdminAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface EnableDelegatedAdminAccountCommandInput extends EnableDelegatedAdminAccountRequest {} +export interface EnableDelegatedAdminAccountCommandOutput + extends EnableDelegatedAdminAccountResponse, + __MetadataBearer {} + +/** + *

                                  Enables the Amazon Inspector delegated administrator for your Organizations organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, EnableDelegatedAdminAccountCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, EnableDelegatedAdminAccountCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new EnableDelegatedAdminAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link EnableDelegatedAdminAccountCommandInput} for command's `input` shape. + * @see {@link EnableDelegatedAdminAccountCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class EnableDelegatedAdminAccountCommand extends $Command< + EnableDelegatedAdminAccountCommandInput, + EnableDelegatedAdminAccountCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: EnableDelegatedAdminAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "EnableDelegatedAdminAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: EnableDelegatedAdminAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: EnableDelegatedAdminAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: EnableDelegatedAdminAccountCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1EnableDelegatedAdminAccountCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1EnableDelegatedAdminAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/GetDelegatedAdminAccountCommand.ts b/clients/client-inspector2/src/commands/GetDelegatedAdminAccountCommand.ts new file mode 100644 index 000000000000..dcd4bddc3140 --- /dev/null +++ b/clients/client-inspector2/src/commands/GetDelegatedAdminAccountCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { GetDelegatedAdminAccountRequest, GetDelegatedAdminAccountResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetDelegatedAdminAccountCommand, + serializeAws_restJson1GetDelegatedAdminAccountCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetDelegatedAdminAccountCommandInput extends GetDelegatedAdminAccountRequest {} +export interface GetDelegatedAdminAccountCommandOutput extends GetDelegatedAdminAccountResponse, __MetadataBearer {} + +/** + *

                                  Retrieves information about the Amazon Inspector delegated administrator for your + * organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, GetDelegatedAdminAccountCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, GetDelegatedAdminAccountCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new GetDelegatedAdminAccountCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetDelegatedAdminAccountCommandInput} for command's `input` shape. + * @see {@link GetDelegatedAdminAccountCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class GetDelegatedAdminAccountCommand extends $Command< + GetDelegatedAdminAccountCommandInput, + GetDelegatedAdminAccountCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetDelegatedAdminAccountCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "GetDelegatedAdminAccountCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetDelegatedAdminAccountRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetDelegatedAdminAccountResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetDelegatedAdminAccountCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetDelegatedAdminAccountCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetDelegatedAdminAccountCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/GetFindingsReportStatusCommand.ts b/clients/client-inspector2/src/commands/GetFindingsReportStatusCommand.ts new file mode 100644 index 000000000000..1241ad57c397 --- /dev/null +++ b/clients/client-inspector2/src/commands/GetFindingsReportStatusCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { GetFindingsReportStatusRequest, GetFindingsReportStatusResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetFindingsReportStatusCommand, + serializeAws_restJson1GetFindingsReportStatusCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetFindingsReportStatusCommandInput extends GetFindingsReportStatusRequest {} +export interface GetFindingsReportStatusCommandOutput extends GetFindingsReportStatusResponse, __MetadataBearer {} + +/** + *

                                  Gets the status of a findings report.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, GetFindingsReportStatusCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, GetFindingsReportStatusCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new GetFindingsReportStatusCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetFindingsReportStatusCommandInput} for command's `input` shape. + * @see {@link GetFindingsReportStatusCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class GetFindingsReportStatusCommand extends $Command< + GetFindingsReportStatusCommandInput, + GetFindingsReportStatusCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetFindingsReportStatusCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "GetFindingsReportStatusCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetFindingsReportStatusRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetFindingsReportStatusResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetFindingsReportStatusCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetFindingsReportStatusCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetFindingsReportStatusCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/GetMemberCommand.ts b/clients/client-inspector2/src/commands/GetMemberCommand.ts new file mode 100644 index 000000000000..7b54a9729286 --- /dev/null +++ b/clients/client-inspector2/src/commands/GetMemberCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { GetMemberRequest, GetMemberResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetMemberCommand, + serializeAws_restJson1GetMemberCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetMemberCommandInput extends GetMemberRequest {} +export interface GetMemberCommandOutput extends GetMemberResponse, __MetadataBearer {} + +/** + *

                                  Gets member information for your organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, GetMemberCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, GetMemberCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new GetMemberCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetMemberCommandInput} for command's `input` shape. + * @see {@link GetMemberCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class GetMemberCommand extends $Command< + GetMemberCommandInput, + GetMemberCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetMemberCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "GetMemberCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetMemberRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetMemberResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetMemberCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetMemberCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetMemberCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListAccountPermissionsCommand.ts b/clients/client-inspector2/src/commands/ListAccountPermissionsCommand.ts new file mode 100644 index 000000000000..f3a0638bd4f5 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListAccountPermissionsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListAccountPermissionsRequest, ListAccountPermissionsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListAccountPermissionsCommand, + serializeAws_restJson1ListAccountPermissionsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListAccountPermissionsCommandInput extends ListAccountPermissionsRequest {} +export interface ListAccountPermissionsCommandOutput extends ListAccountPermissionsResponse, __MetadataBearer {} + +/** + *

                                  Lists the permissions an account has to configure Amazon Inspector.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListAccountPermissionsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListAccountPermissionsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListAccountPermissionsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAccountPermissionsCommandInput} for command's `input` shape. + * @see {@link ListAccountPermissionsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListAccountPermissionsCommand extends $Command< + ListAccountPermissionsCommandInput, + ListAccountPermissionsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAccountPermissionsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListAccountPermissionsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListAccountPermissionsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListAccountPermissionsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListAccountPermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListAccountPermissionsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListAccountPermissionsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListCoverageCommand.ts b/clients/client-inspector2/src/commands/ListCoverageCommand.ts new file mode 100644 index 000000000000..fcb9b3f882b1 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListCoverageCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListCoverageRequest, ListCoverageResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListCoverageCommand, + serializeAws_restJson1ListCoverageCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListCoverageCommandInput extends ListCoverageRequest {} +export interface ListCoverageCommandOutput extends ListCoverageResponse, __MetadataBearer {} + +/** + *

                                  Lists coverage details for you environment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListCoverageCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListCoverageCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListCoverageCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListCoverageCommandInput} for command's `input` shape. + * @see {@link ListCoverageCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListCoverageCommand extends $Command< + ListCoverageCommandInput, + ListCoverageCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListCoverageCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListCoverageCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListCoverageRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListCoverageResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListCoverageCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListCoverageCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListCoverageCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListCoverageStatisticsCommand.ts b/clients/client-inspector2/src/commands/ListCoverageStatisticsCommand.ts new file mode 100644 index 000000000000..04b6d8026749 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListCoverageStatisticsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListCoverageStatisticsRequest, ListCoverageStatisticsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListCoverageStatisticsCommand, + serializeAws_restJson1ListCoverageStatisticsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListCoverageStatisticsCommandInput extends ListCoverageStatisticsRequest {} +export interface ListCoverageStatisticsCommandOutput extends ListCoverageStatisticsResponse, __MetadataBearer {} + +/** + *

                                  Lists Amazon Inspector coverage statistics for your environment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListCoverageStatisticsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListCoverageStatisticsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListCoverageStatisticsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListCoverageStatisticsCommandInput} for command's `input` shape. + * @see {@link ListCoverageStatisticsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListCoverageStatisticsCommand extends $Command< + ListCoverageStatisticsCommandInput, + ListCoverageStatisticsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListCoverageStatisticsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListCoverageStatisticsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListCoverageStatisticsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListCoverageStatisticsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListCoverageStatisticsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListCoverageStatisticsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListCoverageStatisticsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListDelegatedAdminAccountsCommand.ts b/clients/client-inspector2/src/commands/ListDelegatedAdminAccountsCommand.ts new file mode 100644 index 000000000000..3adda1d6521d --- /dev/null +++ b/clients/client-inspector2/src/commands/ListDelegatedAdminAccountsCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListDelegatedAdminAccountsRequest, ListDelegatedAdminAccountsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListDelegatedAdminAccountsCommand, + serializeAws_restJson1ListDelegatedAdminAccountsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListDelegatedAdminAccountsCommandInput extends ListDelegatedAdminAccountsRequest {} +export interface ListDelegatedAdminAccountsCommandOutput extends ListDelegatedAdminAccountsResponse, __MetadataBearer {} + +/** + *

                                  Lists information about the Amazon Inspector delegated administrator of your + * organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListDelegatedAdminAccountsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListDelegatedAdminAccountsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListDelegatedAdminAccountsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListDelegatedAdminAccountsCommandInput} for command's `input` shape. + * @see {@link ListDelegatedAdminAccountsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListDelegatedAdminAccountsCommand extends $Command< + ListDelegatedAdminAccountsCommandInput, + ListDelegatedAdminAccountsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListDelegatedAdminAccountsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListDelegatedAdminAccountsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListDelegatedAdminAccountsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListDelegatedAdminAccountsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListDelegatedAdminAccountsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListDelegatedAdminAccountsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1ListDelegatedAdminAccountsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListFiltersCommand.ts b/clients/client-inspector2/src/commands/ListFiltersCommand.ts new file mode 100644 index 000000000000..024b3a5d0f2a --- /dev/null +++ b/clients/client-inspector2/src/commands/ListFiltersCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListFiltersRequest, ListFiltersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListFiltersCommand, + serializeAws_restJson1ListFiltersCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListFiltersCommandInput extends ListFiltersRequest {} +export interface ListFiltersCommandOutput extends ListFiltersResponse, __MetadataBearer {} + +/** + *

                                  Lists the filters associated with your account.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListFiltersCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListFiltersCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListFiltersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListFiltersCommandInput} for command's `input` shape. + * @see {@link ListFiltersCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListFiltersCommand extends $Command< + ListFiltersCommandInput, + ListFiltersCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListFiltersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListFiltersCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListFiltersRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListFiltersResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListFiltersCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListFiltersCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListFiltersCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListFindingAggregationsCommand.ts b/clients/client-inspector2/src/commands/ListFindingAggregationsCommand.ts new file mode 100644 index 000000000000..667edadc3159 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListFindingAggregationsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListFindingAggregationsRequest, ListFindingAggregationsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListFindingAggregationsCommand, + serializeAws_restJson1ListFindingAggregationsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListFindingAggregationsCommandInput extends ListFindingAggregationsRequest {} +export interface ListFindingAggregationsCommandOutput extends ListFindingAggregationsResponse, __MetadataBearer {} + +/** + *

                                  Lists aggregated finding data for your environment based on specific criteria.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListFindingAggregationsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListFindingAggregationsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListFindingAggregationsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListFindingAggregationsCommandInput} for command's `input` shape. + * @see {@link ListFindingAggregationsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListFindingAggregationsCommand extends $Command< + ListFindingAggregationsCommandInput, + ListFindingAggregationsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListFindingAggregationsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListFindingAggregationsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListFindingAggregationsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListFindingAggregationsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListFindingAggregationsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListFindingAggregationsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListFindingAggregationsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListFindingsCommand.ts b/clients/client-inspector2/src/commands/ListFindingsCommand.ts new file mode 100644 index 000000000000..9a16020d7133 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListFindingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListFindingsRequest, ListFindingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListFindingsCommand, + serializeAws_restJson1ListFindingsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListFindingsCommandInput extends ListFindingsRequest {} +export interface ListFindingsCommandOutput extends ListFindingsResponse, __MetadataBearer {} + +/** + *

                                  Lists findings for your environment.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListFindingsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListFindingsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListFindingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListFindingsCommandInput} for command's `input` shape. + * @see {@link ListFindingsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListFindingsCommand extends $Command< + ListFindingsCommandInput, + ListFindingsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListFindingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListFindingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListFindingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListFindingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListFindingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListFindingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListFindingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListMembersCommand.ts b/clients/client-inspector2/src/commands/ListMembersCommand.ts new file mode 100644 index 000000000000..ff795c4924ef --- /dev/null +++ b/clients/client-inspector2/src/commands/ListMembersCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListMembersRequest, ListMembersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListMembersCommand, + serializeAws_restJson1ListMembersCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListMembersCommandInput extends ListMembersRequest {} +export interface ListMembersCommandOutput extends ListMembersResponse, __MetadataBearer {} + +/** + *

                                  List members associated with the Amazon Inspector delegated administrator for your + * organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListMembersCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListMembersCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListMembersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListMembersCommandInput} for command's `input` shape. + * @see {@link ListMembersCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListMembersCommand extends $Command< + ListMembersCommandInput, + ListMembersCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListMembersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListMembersCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListMembersRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListMembersResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListMembersCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListMembersCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListMembersCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListTagsForResourceCommand.ts b/clients/client-inspector2/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..83f1a1c9052d --- /dev/null +++ b/clients/client-inspector2/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                                  Lists all tags attached to a given resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListTagsForResourceCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListTagsForResourceCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/ListUsageTotalsCommand.ts b/clients/client-inspector2/src/commands/ListUsageTotalsCommand.ts new file mode 100644 index 000000000000..b7b82bb38054 --- /dev/null +++ b/clients/client-inspector2/src/commands/ListUsageTotalsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { ListUsageTotalsRequest, ListUsageTotalsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListUsageTotalsCommand, + serializeAws_restJson1ListUsageTotalsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListUsageTotalsCommandInput extends ListUsageTotalsRequest {} +export interface ListUsageTotalsCommandOutput extends ListUsageTotalsResponse, __MetadataBearer {} + +/** + *

                                  Lists the Amazon Inspector usage totals over the last 30 days.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, ListUsageTotalsCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, ListUsageTotalsCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new ListUsageTotalsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListUsageTotalsCommandInput} for command's `input` shape. + * @see {@link ListUsageTotalsCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class ListUsageTotalsCommand extends $Command< + ListUsageTotalsCommandInput, + ListUsageTotalsCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListUsageTotalsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "ListUsageTotalsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListUsageTotalsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListUsageTotalsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListUsageTotalsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListUsageTotalsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListUsageTotalsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/TagResourceCommand.ts b/clients/client-inspector2/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..e047d87cc1d9 --- /dev/null +++ b/clients/client-inspector2/src/commands/TagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                                  Adds tags to a resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, TagResourceCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, TagResourceCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/UntagResourceCommand.ts b/clients/client-inspector2/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..dc080fb2ad13 --- /dev/null +++ b/clients/client-inspector2/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                                  Removes tags from a resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, UntagResourceCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, UntagResourceCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/UpdateFilterCommand.ts b/clients/client-inspector2/src/commands/UpdateFilterCommand.ts new file mode 100644 index 000000000000..4f95ede7d332 --- /dev/null +++ b/clients/client-inspector2/src/commands/UpdateFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { UpdateFilterRequest, UpdateFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateFilterCommand, + serializeAws_restJson1UpdateFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateFilterCommandInput extends UpdateFilterRequest {} +export interface UpdateFilterCommandOutput extends UpdateFilterResponse, __MetadataBearer {} + +/** + *

                                  Specifies the action that is to be applied to the findings that match the filter.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, UpdateFilterCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, UpdateFilterCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new UpdateFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateFilterCommandInput} for command's `input` shape. + * @see {@link UpdateFilterCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class UpdateFilterCommand extends $Command< + UpdateFilterCommandInput, + UpdateFilterCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "UpdateFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/UpdateOrganizationConfigurationCommand.ts b/clients/client-inspector2/src/commands/UpdateOrganizationConfigurationCommand.ts new file mode 100644 index 000000000000..b7205966acf5 --- /dev/null +++ b/clients/client-inspector2/src/commands/UpdateOrganizationConfigurationCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { Inspector2ClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../Inspector2Client"; +import { UpdateOrganizationConfigurationRequest, UpdateOrganizationConfigurationResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateOrganizationConfigurationCommand, + serializeAws_restJson1UpdateOrganizationConfigurationCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateOrganizationConfigurationCommandInput extends UpdateOrganizationConfigurationRequest {} +export interface UpdateOrganizationConfigurationCommandOutput + extends UpdateOrganizationConfigurationResponse, + __MetadataBearer {} + +/** + *

                                  Updates the configurations for your Amazon Inspector organization.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { Inspector2Client, UpdateOrganizationConfigurationCommand } from "@aws-sdk/client-inspector2"; // ES Modules import + * // const { Inspector2Client, UpdateOrganizationConfigurationCommand } = require("@aws-sdk/client-inspector2"); // CommonJS import + * const client = new Inspector2Client(config); + * const command = new UpdateOrganizationConfigurationCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateOrganizationConfigurationCommandInput} for command's `input` shape. + * @see {@link UpdateOrganizationConfigurationCommandOutput} for command's `response` shape. + * @see {@link Inspector2ClientResolvedConfig | config} for Inspector2Client's `config` shape. + * + */ +export class UpdateOrganizationConfigurationCommand extends $Command< + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, + Inspector2ClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateOrganizationConfigurationCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: Inspector2ClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "Inspector2Client"; + const commandName = "UpdateOrganizationConfigurationCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateOrganizationConfigurationRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateOrganizationConfigurationResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: UpdateOrganizationConfigurationCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateOrganizationConfigurationCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1UpdateOrganizationConfigurationCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-inspector2/src/commands/index.ts b/clients/client-inspector2/src/commands/index.ts new file mode 100644 index 000000000000..21f87c83246e --- /dev/null +++ b/clients/client-inspector2/src/commands/index.ts @@ -0,0 +1,30 @@ +export * from "./AssociateMemberCommand"; +export * from "./BatchGetAccountStatusCommand"; +export * from "./BatchGetFreeTrialInfoCommand"; +export * from "./CancelFindingsReportCommand"; +export * from "./CreateFilterCommand"; +export * from "./CreateFindingsReportCommand"; +export * from "./DeleteFilterCommand"; +export * from "./DescribeOrganizationConfigurationCommand"; +export * from "./DisableCommand"; +export * from "./DisableDelegatedAdminAccountCommand"; +export * from "./DisassociateMemberCommand"; +export * from "./EnableCommand"; +export * from "./EnableDelegatedAdminAccountCommand"; +export * from "./GetDelegatedAdminAccountCommand"; +export * from "./GetFindingsReportStatusCommand"; +export * from "./GetMemberCommand"; +export * from "./ListAccountPermissionsCommand"; +export * from "./ListCoverageCommand"; +export * from "./ListCoverageStatisticsCommand"; +export * from "./ListDelegatedAdminAccountsCommand"; +export * from "./ListFiltersCommand"; +export * from "./ListFindingAggregationsCommand"; +export * from "./ListFindingsCommand"; +export * from "./ListMembersCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./ListUsageTotalsCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateFilterCommand"; +export * from "./UpdateOrganizationConfigurationCommand"; diff --git a/clients/client-inspector2/src/endpoints.ts b/clients/client-inspector2/src/endpoints.ts new file mode 100644 index 000000000000..3badf56c0456 --- /dev/null +++ b/clients/client-inspector2/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "inspector2.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "inspector2-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "inspector2-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "inspector2.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "inspector2.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "inspector2-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "inspector2-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "inspector2.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "inspector2.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "inspector2-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "inspector2.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "inspector2-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "inspector2.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "inspector2-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "inspector2-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "inspector2.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "inspector2", + regionHash, + partitionHash, + }); diff --git a/clients/client-inspector2/src/index.ts b/clients/client-inspector2/src/index.ts new file mode 100644 index 000000000000..270dabdbb80f --- /dev/null +++ b/clients/client-inspector2/src/index.ts @@ -0,0 +1,5 @@ +export * from "./Inspector2"; +export * from "./Inspector2Client"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-inspector2/src/models/index.ts b/clients/client-inspector2/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-inspector2/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-inspector2/src/models/models_0.ts b/clients/client-inspector2/src/models/models_0.ts new file mode 100644 index 000000000000..53013e0de6db --- /dev/null +++ b/clients/client-inspector2/src/models/models_0.ts @@ -0,0 +1,4850 @@ +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                                  You do not have sufficient access to perform this action.

                                  + */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + message: string | undefined; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +export enum Status { + DISABLED = "DISABLED", + DISABLING = "DISABLING", + ENABLED = "ENABLED", + ENABLING = "ENABLING", + SUSPENDED = "SUSPENDED", + SUSPENDING = "SUSPENDING", +} + +/** + *

                                  Details the status of Amazon Inspector for each resource type Amazon Inspector scans.

                                  + */ +export interface ResourceStatus { + /** + *

                                  The status of Amazon Inspector scanning for Amazon EC2 resources.

                                  + */ + ec2: Status | string | undefined; + + /** + *

                                  The status of Amazon Inspector scanning for Amazon ECR resources.

                                  + */ + ecr: Status | string | undefined; +} + +export namespace ResourceStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceStatus): any => ({ + ...obj, + }); +} + +/** + *

                                  An Amazon Web Services account within your environment that Amazon Inspector has been enabled for.

                                  + */ +export interface Account { + /** + *

                                  The ID of the Amazon Web Services account.

                                  + */ + accountId: string | undefined; + + /** + *

                                  The status of Amazon Inspector for the account.

                                  + */ + status: Status | string | undefined; + + /** + *

                                  Details of the status of Amazon Inspector scans by resource type.

                                  + */ + resourceStatus: ResourceStatus | undefined; +} + +export namespace Account { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Account): any => ({ + ...obj, + }); +} + +export enum AggregationFindingType { + NETWORK_REACHABILITY = "NETWORK_REACHABILITY", + PACKAGE_VULNERABILITY = "PACKAGE_VULNERABILITY", +} + +export enum AggregationResourceType { + AWS_EC2_INSTANCE = "AWS_EC2_INSTANCE", + AWS_ECR_CONTAINER_IMAGE = "AWS_ECR_CONTAINER_IMAGE", +} + +export enum AccountSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +export enum SortOrder { + ASC = "ASC", + DESC = "DESC", +} + +/** + *

                                  An object that contains details about an aggregation response based on + * Amazon Web Services accounts.

                                  + */ +export interface AccountAggregation { + /** + *

                                  The type of finding.

                                  + */ + findingType?: AggregationFindingType | string; + + /** + *

                                  The type of resource.

                                  + */ + resourceType?: AggregationResourceType | string; + + /** + *

                                  The sort order (ascending or descending).

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort by.

                                  + */ + sortBy?: AccountSortBy | string; +} + +export namespace AccountAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccountAggregation): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that contains the counts of aggregated finding per severity.

                                  + */ +export interface SeverityCounts { + /** + *

                                  The total count of findings from all severities.

                                  + */ + all?: number; + + /** + *

                                  The total count of medium severity findings.

                                  + */ + medium?: number; + + /** + *

                                  The total count of high severity findings.

                                  + */ + high?: number; + + /** + *

                                  The total count of critical severity findings.

                                  + */ + critical?: number; +} + +export namespace SeverityCounts { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SeverityCounts): any => ({ + ...obj, + }); +} + +/** + *

                                  An aggregation of findings by Amazon Web Services account ID.

                                  + */ +export interface AccountAggregationResponse { + /** + *

                                  The Amazon Web Services account ID.

                                  + */ + accountId?: string; + + /** + *

                                  The number of findings by severity.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace AccountAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccountAggregationResponse): any => ({ + ...obj, + }); +} + +export enum ErrorCode { + ACCESS_DENIED = "ACCESS_DENIED", + ALREADY_ENABLED = "ALREADY_ENABLED", + DISABLE_IN_PROGRESS = "DISABLE_IN_PROGRESS", + DISASSOCIATE_ALL_MEMBERS = "DISASSOCIATE_ALL_MEMBERS", + ENABLE_IN_PROGRESS = "ENABLE_IN_PROGRESS", + EVENTBRIDGE_THROTTLED = "EVENTBRIDGE_THROTTLED", + EVENTBRIDGE_UNAVAILABLE = "EVENTBRIDGE_UNAVAILABLE", + INTERNAL_ERROR = "INTERNAL_ERROR", + RESOURCE_NOT_FOUND = "RESOURCE_NOT_FOUND", + RESOURCE_SCAN_NOT_DISABLED = "RESOURCE_SCAN_NOT_DISABLED", + SSM_THROTTLED = "SSM_THROTTLED", + SSM_UNAVAILABLE = "SSM_UNAVAILABLE", + SUSPEND_IN_PROGRESS = "SUSPEND_IN_PROGRESS", +} + +/** + *

                                  An object that described the state of Amazon Inspector scans for an account.

                                  + */ +export interface State { + /** + *

                                  The status of Amazon Inspector for the account.

                                  + */ + status: Status | string | undefined; + + /** + *

                                  The error code explaining why the account failed to enable Amazon Inspector.

                                  + */ + errorCode: ErrorCode | string | undefined; + + /** + *

                                  The error message received when the account failed to enable Amazon Inspector.

                                  + */ + errorMessage: string | undefined; +} + +export namespace State { + /** + * @internal + */ + export const filterSensitiveLog = (obj: State): any => ({ + ...obj, + }); +} + +/** + *

                                  Details the state of Amazon Inspector for each resource type Amazon Inspector scans.

                                  + */ +export interface ResourceState { + /** + *

                                  An object detailing the state of Amazon Inspector scanning for Amazon EC2 resources.

                                  + */ + ec2: State | undefined; + + /** + *

                                  An object detailing the state of Amazon Inspector scanning for Amazon ECR resources.

                                  + */ + ecr: State | undefined; +} + +export namespace ResourceState { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceState): any => ({ + ...obj, + }); +} + +/** + *

                                  An object with details the status of an Amazon Web Services account within your Amazon Inspector environment

                                  + */ +export interface AccountState { + /** + *

                                  The Amazon Web Services account ID.

                                  + */ + accountId: string | undefined; + + /** + *

                                  An object detailing the status of Amazon Inspector for the account.

                                  + */ + state: State | undefined; + + /** + *

                                  An object detailing which resources Amazon Inspector is enabled to scan for the account.

                                  + */ + resourceState: ResourceState | undefined; +} + +export namespace AccountState { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccountState): any => ({ + ...obj, + }); +} + +export enum StringComparison { + EQUALS = "EQUALS", + NOT_EQUALS = "NOT_EQUALS", + PREFIX = "PREFIX", +} + +/** + *

                                  An object that describes the details of a string filter.

                                  + */ +export interface StringFilter { + /** + *

                                  The operator to use when comparing values in the filter

                                  + */ + comparison: StringComparison | string | undefined; + + /** + *

                                  The value to filter on.

                                  + */ + value: string | undefined; +} + +export namespace StringFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StringFilter): any => ({ + ...obj, + }); +} + +export enum AmiSortBy { + AFFECTED_INSTANCES = "AFFECTED_INSTANCES", + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on Amazon machine images (AMIs).

                                  + */ +export interface AmiAggregation { + /** + *

                                  The IDs of AMIs to aggregate findings for.

                                  + */ + amis?: StringFilter[]; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: AmiSortBy | string; +} + +export namespace AmiAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AmiAggregation): any => ({ + ...obj, + }); +} + +export enum AwsEcrContainerSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  An aggregation of information about Amazon ECR containers.

                                  + */ +export interface AwsEcrContainerAggregation { + /** + *

                                  The container resource IDs.

                                  + */ + resourceIds?: StringFilter[]; + + /** + *

                                  The image SHA values.

                                  + */ + imageShas?: StringFilter[]; + + /** + *

                                  The container repositories.

                                  + */ + repositories?: StringFilter[]; + + /** + *

                                  The architecture of the containers.

                                  + */ + architectures?: StringFilter[]; + + /** + *

                                  The image tags.

                                  + */ + imageTags?: StringFilter[]; + + /** + *

                                  The sort order (ascending or descending).

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort by.

                                  + */ + sortBy?: AwsEcrContainerSortBy | string; +} + +export namespace AwsEcrContainerAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AwsEcrContainerAggregation): any => ({ + ...obj, + }); +} + +export enum MapComparison { + EQUALS = "EQUALS", +} + +/** + *

                                  An object that describes details of a map filter.

                                  + */ +export interface MapFilter { + /** + *

                                  The operator to use when comparing values in the filter.

                                  + */ + comparison: MapComparison | string | undefined; + + /** + *

                                  The tag key used in the filter.

                                  + */ + key: string | undefined; + + /** + *

                                  The tag value used in the filter.

                                  + */ + value?: string; +} + +export namespace MapFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: MapFilter): any => ({ + ...obj, + }); +} + +export enum Ec2InstanceSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", + NETWORK_FINDINGS = "NETWORK_FINDINGS", +} + +/** + *

                                  The details that define an aggregation based on Amazon EC2 instances.

                                  + */ +export interface Ec2InstanceAggregation { + /** + *

                                  The AMI IDs associated with the Amazon EC2 instances to aggregate findings for.

                                  + */ + amis?: StringFilter[]; + + /** + *

                                  The operating system types to aggregate findings for. Valid values must be uppercase and + * underscore separated, examples are ORACLE_LINUX_7 and + * ALPINE_LINUX_3_8.

                                  + */ + operatingSystems?: StringFilter[]; + + /** + *

                                  The Amazon EC2 instance IDs to aggregate findings for.

                                  + */ + instanceIds?: StringFilter[]; + + /** + *

                                  The Amazon EC2 instance tags to aggregate findings for.

                                  + */ + instanceTags?: MapFilter[]; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: Ec2InstanceSortBy | string; +} + +export namespace Ec2InstanceAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Ec2InstanceAggregation): any => ({ + ...obj, + }); +} + +export enum FindingTypeSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on finding type.

                                  + */ +export interface FindingTypeAggregation { + /** + *

                                  The finding type to aggregate.

                                  + */ + findingType?: AggregationFindingType | string; + + /** + *

                                  The resource type to aggregate.

                                  + */ + resourceType?: AggregationResourceType | string; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: FindingTypeSortBy | string; +} + +export namespace FindingTypeAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FindingTypeAggregation): any => ({ + ...obj, + }); +} + +export enum ImageLayerSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on container image layers.

                                  + */ +export interface ImageLayerAggregation { + /** + *

                                  The repository associated with the container image hosting the layers.

                                  + */ + repositories?: StringFilter[]; + + /** + *

                                  The ID of the container image layer.

                                  + */ + resourceIds?: StringFilter[]; + + /** + *

                                  The hashes associated with the layers.

                                  + */ + layerHashes?: StringFilter[]; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: ImageLayerSortBy | string; +} + +export namespace ImageLayerAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImageLayerAggregation): any => ({ + ...obj, + }); +} + +export enum PackageSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on operating system package type.

                                  + */ +export interface PackageAggregation { + /** + *

                                  The names of packages to aggregate findings on.

                                  + */ + packageNames?: StringFilter[]; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: PackageSortBy | string; +} + +export namespace PackageAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PackageAggregation): any => ({ + ...obj, + }); +} + +export enum RepositorySortBy { + AFFECTED_IMAGES = "AFFECTED_IMAGES", + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on repository.

                                  + */ +export interface RepositoryAggregation { + /** + *

                                  The names of repositories to aggregate findings on.

                                  + */ + repositories?: StringFilter[]; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: RepositorySortBy | string; +} + +export namespace RepositoryAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RepositoryAggregation): any => ({ + ...obj, + }); +} + +export enum TitleSortBy { + ALL = "ALL", + CRITICAL = "CRITICAL", + HIGH = "HIGH", +} + +/** + *

                                  The details that define an aggregation based on finding title.

                                  + */ +export interface TitleAggregation { + /** + *

                                  The finding titles to aggregate on.

                                  + */ + titles?: StringFilter[]; + + /** + *

                                  The vulnerability IDs of the findings.

                                  + */ + vulnerabilityIds?: StringFilter[]; + + /** + *

                                  The resource type to aggregate on.

                                  + */ + resourceType?: AggregationResourceType | string; + + /** + *

                                  The order to sort results by.

                                  + */ + sortOrder?: SortOrder | string; + + /** + *

                                  The value to sort results by.

                                  + */ + sortBy?: TitleSortBy | string; +} + +export namespace TitleAggregation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TitleAggregation): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about an aggregation request.

                                  + */ +export type AggregationRequest = + | AggregationRequest.AccountAggregationMember + | AggregationRequest.AmiAggregationMember + | AggregationRequest.AwsEcrContainerAggregationMember + | AggregationRequest.Ec2InstanceAggregationMember + | AggregationRequest.FindingTypeAggregationMember + | AggregationRequest.ImageLayerAggregationMember + | AggregationRequest.PackageAggregationMember + | AggregationRequest.RepositoryAggregationMember + | AggregationRequest.TitleAggregationMember + | AggregationRequest.$UnknownMember; + +export namespace AggregationRequest { + /** + *

                                  An object that contains details about an aggregation request based on Amazon Web Services account + * IDs.

                                  + */ + export interface AccountAggregationMember { + accountAggregation: AccountAggregation; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on Amazon Machine + * Images (AMIs).

                                  + */ + export interface AmiAggregationMember { + accountAggregation?: never; + amiAggregation: AmiAggregation; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on Amazon ECR container + * images.

                                  + */ + export interface AwsEcrContainerAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation: AwsEcrContainerAggregation; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on Amazon EC2 + * instances.

                                  + */ + export interface Ec2InstanceAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation: Ec2InstanceAggregation; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on finding types.

                                  + */ + export interface FindingTypeAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation: FindingTypeAggregation; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on container image + * layers.

                                  + */ + export interface ImageLayerAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation: ImageLayerAggregation; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on operating system + * package type.

                                  + */ + export interface PackageAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation: PackageAggregation; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on Amazon ECR repositories.

                                  + */ + export interface RepositoryAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation: RepositoryAggregation; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation request based on finding title.

                                  + */ + export interface TitleAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation: TitleAggregation; + $unknown?: never; + } + + export interface $UnknownMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown: [string, any]; + } + + export interface Visitor { + accountAggregation: (value: AccountAggregation) => T; + amiAggregation: (value: AmiAggregation) => T; + awsEcrContainerAggregation: (value: AwsEcrContainerAggregation) => T; + ec2InstanceAggregation: (value: Ec2InstanceAggregation) => T; + findingTypeAggregation: (value: FindingTypeAggregation) => T; + imageLayerAggregation: (value: ImageLayerAggregation) => T; + packageAggregation: (value: PackageAggregation) => T; + repositoryAggregation: (value: RepositoryAggregation) => T; + titleAggregation: (value: TitleAggregation) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: AggregationRequest, visitor: Visitor): T => { + if (value.accountAggregation !== undefined) return visitor.accountAggregation(value.accountAggregation); + if (value.amiAggregation !== undefined) return visitor.amiAggregation(value.amiAggregation); + if (value.awsEcrContainerAggregation !== undefined) + return visitor.awsEcrContainerAggregation(value.awsEcrContainerAggregation); + if (value.ec2InstanceAggregation !== undefined) return visitor.ec2InstanceAggregation(value.ec2InstanceAggregation); + if (value.findingTypeAggregation !== undefined) return visitor.findingTypeAggregation(value.findingTypeAggregation); + if (value.imageLayerAggregation !== undefined) return visitor.imageLayerAggregation(value.imageLayerAggregation); + if (value.packageAggregation !== undefined) return visitor.packageAggregation(value.packageAggregation); + if (value.repositoryAggregation !== undefined) return visitor.repositoryAggregation(value.repositoryAggregation); + if (value.titleAggregation !== undefined) return visitor.titleAggregation(value.titleAggregation); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; + + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregationRequest): any => { + if (obj.accountAggregation !== undefined) + return { accountAggregation: AccountAggregation.filterSensitiveLog(obj.accountAggregation) }; + if (obj.amiAggregation !== undefined) + return { amiAggregation: AmiAggregation.filterSensitiveLog(obj.amiAggregation) }; + if (obj.awsEcrContainerAggregation !== undefined) + return { + awsEcrContainerAggregation: AwsEcrContainerAggregation.filterSensitiveLog(obj.awsEcrContainerAggregation), + }; + if (obj.ec2InstanceAggregation !== undefined) + return { ec2InstanceAggregation: Ec2InstanceAggregation.filterSensitiveLog(obj.ec2InstanceAggregation) }; + if (obj.findingTypeAggregation !== undefined) + return { findingTypeAggregation: FindingTypeAggregation.filterSensitiveLog(obj.findingTypeAggregation) }; + if (obj.imageLayerAggregation !== undefined) + return { imageLayerAggregation: ImageLayerAggregation.filterSensitiveLog(obj.imageLayerAggregation) }; + if (obj.packageAggregation !== undefined) + return { packageAggregation: PackageAggregation.filterSensitiveLog(obj.packageAggregation) }; + if (obj.repositoryAggregation !== undefined) + return { repositoryAggregation: RepositoryAggregation.filterSensitiveLog(obj.repositoryAggregation) }; + if (obj.titleAggregation !== undefined) + return { titleAggregation: TitleAggregation.filterSensitiveLog(obj.titleAggregation) }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; + }; +} + +/** + *

                                  A response that contains the results of a finding aggregation by AMI.

                                  + */ +export interface AmiAggregationResponse { + /** + *

                                  The ID of the AMI that findings were aggregated for.

                                  + */ + ami: string | undefined; + + /** + *

                                  The Amazon Web Services account ID that the AMI belongs.

                                  + */ + accountId?: string; + + /** + *

                                  An object that contains the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; + + /** + *

                                  The IDs of Amazon EC2 instances using this AMI.

                                  + */ + affectedInstances?: number; +} + +export namespace AmiAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AmiAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  An aggregation of information about Amazon ECR containers.

                                  + */ +export interface AwsEcrContainerAggregationResponse { + /** + *

                                  The resource ID of the container.

                                  + */ + resourceId: string | undefined; + + /** + *

                                  The SHA value of the container image.

                                  + */ + imageSha?: string; + + /** + *

                                  The container repository.

                                  + */ + repository?: string; + + /** + *

                                  The architecture of the container.

                                  + */ + architecture?: string; + + /** + *

                                  The container image stags.

                                  + */ + imageTags?: string[]; + + /** + *

                                  The Amazon Web Services account ID of the account that owns the container.

                                  + */ + accountId?: string; + + /** + *

                                  The number of finding by severity.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace AwsEcrContainerAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AwsEcrContainerAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains the results of a finding aggregation by Amazon EC2 instance.

                                  + */ +export interface Ec2InstanceAggregationResponse { + /** + *

                                  The Amazon EC2 instance ID.

                                  + */ + instanceId: string | undefined; + + /** + *

                                  The Amazon Machine Image (AMI) of the Amazon EC2 instance.

                                  + */ + ami?: string; + + /** + *

                                  The operating system of the Amazon EC2 instance.

                                  + */ + operatingSystem?: string; + + /** + *

                                  The tags attached to the instance.

                                  + */ + instanceTags?: { [key: string]: string }; + + /** + *

                                  The Amazon Web Services account the Amazon EC2 instance belongs to.

                                  + */ + accountId?: string; + + /** + *

                                  An object that contains the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; + + /** + *

                                  The number of network findings for the Amazon EC2 instance.

                                  + */ + networkFindings?: number; +} + +export namespace Ec2InstanceAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Ec2InstanceAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains the results of a finding type aggregation.

                                  + */ +export interface FindingTypeAggregationResponse { + /** + *

                                  The ID of the Amazon Web Services account associated with the findings.

                                  + */ + accountId?: string; + + /** + *

                                  The value to sort results by.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace FindingTypeAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FindingTypeAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains the results of a finding aggregation by image layer.

                                  + */ +export interface ImageLayerAggregationResponse { + /** + *

                                  The repository the layer resides in.

                                  + */ + repository: string | undefined; + + /** + *

                                  The resource ID of the container image layer.

                                  + */ + resourceId: string | undefined; + + /** + *

                                  The layer hash.

                                  + */ + layerHash: string | undefined; + + /** + *

                                  The ID of the Amazon Web Services account that owns the container image hosting the layer image.

                                  + */ + accountId: string | undefined; + + /** + *

                                  An object that represents the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace ImageLayerAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImageLayerAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains the results of a finding aggregation by image layer.

                                  + */ +export interface PackageAggregationResponse { + /** + *

                                  The name of the operating system package.

                                  + */ + packageName: string | undefined; + + /** + *

                                  The ID of the Amazon Web Services account associated with the findings.

                                  + */ + accountId?: string; + + /** + *

                                  An object that contains the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace PackageAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PackageAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains details on the results of a finding aggregation by repository.

                                  + */ +export interface RepositoryAggregationResponse { + /** + *

                                  The name of the repository associated with the findings.

                                  + */ + repository: string | undefined; + + /** + *

                                  The ID of the Amazon Web Services account associated with the findings.

                                  + */ + accountId?: string; + + /** + *

                                  An object that represent the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; + + /** + *

                                  The number of container images impacted by the findings.

                                  + */ + affectedImages?: number; +} + +export namespace RepositoryAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RepositoryAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A response that contains details on the results of a finding aggregation by title.

                                  + */ +export interface TitleAggregationResponse { + /** + *

                                  The title that the findings were aggregated on.

                                  + */ + title: string | undefined; + + /** + *

                                  The vulnerability ID of the finding.

                                  + */ + vulnerabilityId?: string; + + /** + *

                                  The ID of the Amazon Web Services account associated with the findings.

                                  + */ + accountId?: string; + + /** + *

                                  An object that represent the count of matched findings per severity.

                                  + */ + severityCounts?: SeverityCounts; +} + +export namespace TitleAggregationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TitleAggregationResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains details about the results of an aggregation type.

                                  + */ +export type AggregationResponse = + | AggregationResponse.AccountAggregationMember + | AggregationResponse.AmiAggregationMember + | AggregationResponse.AwsEcrContainerAggregationMember + | AggregationResponse.Ec2InstanceAggregationMember + | AggregationResponse.FindingTypeAggregationMember + | AggregationResponse.ImageLayerAggregationMember + | AggregationResponse.PackageAggregationMember + | AggregationResponse.RepositoryAggregationMember + | AggregationResponse.TitleAggregationMember + | AggregationResponse.$UnknownMember; + +export namespace AggregationResponse { + /** + *

                                  An object that contains details about an aggregation response based on Amazon Web Services account + * IDs.

                                  + */ + export interface AccountAggregationMember { + accountAggregation: AccountAggregationResponse; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on Amazon Machine + * Images (AMIs).

                                  + */ + export interface AmiAggregationMember { + accountAggregation?: never; + amiAggregation: AmiAggregationResponse; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on Amazon ECR container + * images.

                                  + */ + export interface AwsEcrContainerAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation: AwsEcrContainerAggregationResponse; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on Amazon EC2 + * instances.

                                  + */ + export interface Ec2InstanceAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation: Ec2InstanceAggregationResponse; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on finding types.

                                  + */ + export interface FindingTypeAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation: FindingTypeAggregationResponse; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on container image + * layers.

                                  + */ + export interface ImageLayerAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation: ImageLayerAggregationResponse; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on operating system + * package type.

                                  + */ + export interface PackageAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation: PackageAggregationResponse; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on Amazon ECR + * repositories.

                                  + */ + export interface RepositoryAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation: RepositoryAggregationResponse; + titleAggregation?: never; + $unknown?: never; + } + + /** + *

                                  An object that contains details about an aggregation response based on finding title.

                                  + */ + export interface TitleAggregationMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation: TitleAggregationResponse; + $unknown?: never; + } + + export interface $UnknownMember { + accountAggregation?: never; + amiAggregation?: never; + awsEcrContainerAggregation?: never; + ec2InstanceAggregation?: never; + findingTypeAggregation?: never; + imageLayerAggregation?: never; + packageAggregation?: never; + repositoryAggregation?: never; + titleAggregation?: never; + $unknown: [string, any]; + } + + export interface Visitor { + accountAggregation: (value: AccountAggregationResponse) => T; + amiAggregation: (value: AmiAggregationResponse) => T; + awsEcrContainerAggregation: (value: AwsEcrContainerAggregationResponse) => T; + ec2InstanceAggregation: (value: Ec2InstanceAggregationResponse) => T; + findingTypeAggregation: (value: FindingTypeAggregationResponse) => T; + imageLayerAggregation: (value: ImageLayerAggregationResponse) => T; + packageAggregation: (value: PackageAggregationResponse) => T; + repositoryAggregation: (value: RepositoryAggregationResponse) => T; + titleAggregation: (value: TitleAggregationResponse) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: AggregationResponse, visitor: Visitor): T => { + if (value.accountAggregation !== undefined) return visitor.accountAggregation(value.accountAggregation); + if (value.amiAggregation !== undefined) return visitor.amiAggregation(value.amiAggregation); + if (value.awsEcrContainerAggregation !== undefined) + return visitor.awsEcrContainerAggregation(value.awsEcrContainerAggregation); + if (value.ec2InstanceAggregation !== undefined) return visitor.ec2InstanceAggregation(value.ec2InstanceAggregation); + if (value.findingTypeAggregation !== undefined) return visitor.findingTypeAggregation(value.findingTypeAggregation); + if (value.imageLayerAggregation !== undefined) return visitor.imageLayerAggregation(value.imageLayerAggregation); + if (value.packageAggregation !== undefined) return visitor.packageAggregation(value.packageAggregation); + if (value.repositoryAggregation !== undefined) return visitor.repositoryAggregation(value.repositoryAggregation); + if (value.titleAggregation !== undefined) return visitor.titleAggregation(value.titleAggregation); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; + + /** + * @internal + */ + export const filterSensitiveLog = (obj: AggregationResponse): any => { + if (obj.accountAggregation !== undefined) + return { accountAggregation: AccountAggregationResponse.filterSensitiveLog(obj.accountAggregation) }; + if (obj.amiAggregation !== undefined) + return { amiAggregation: AmiAggregationResponse.filterSensitiveLog(obj.amiAggregation) }; + if (obj.awsEcrContainerAggregation !== undefined) + return { + awsEcrContainerAggregation: AwsEcrContainerAggregationResponse.filterSensitiveLog( + obj.awsEcrContainerAggregation + ), + }; + if (obj.ec2InstanceAggregation !== undefined) + return { ec2InstanceAggregation: Ec2InstanceAggregationResponse.filterSensitiveLog(obj.ec2InstanceAggregation) }; + if (obj.findingTypeAggregation !== undefined) + return { findingTypeAggregation: FindingTypeAggregationResponse.filterSensitiveLog(obj.findingTypeAggregation) }; + if (obj.imageLayerAggregation !== undefined) + return { imageLayerAggregation: ImageLayerAggregationResponse.filterSensitiveLog(obj.imageLayerAggregation) }; + if (obj.packageAggregation !== undefined) + return { packageAggregation: PackageAggregationResponse.filterSensitiveLog(obj.packageAggregation) }; + if (obj.repositoryAggregation !== undefined) + return { repositoryAggregation: RepositoryAggregationResponse.filterSensitiveLog(obj.repositoryAggregation) }; + if (obj.titleAggregation !== undefined) + return { titleAggregation: TitleAggregationResponse.filterSensitiveLog(obj.titleAggregation) }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; + }; +} + +export enum AggregationType { + ACCOUNT = "ACCOUNT", + AMI = "AMI", + AWS_EC2_INSTANCE = "AWS_EC2_INSTANCE", + AWS_ECR_CONTAINER = "AWS_ECR_CONTAINER", + FINDING_TYPE = "FINDING_TYPE", + IMAGE_LAYER = "IMAGE_LAYER", + PACKAGE = "PACKAGE", + REPOSITORY = "REPOSITORY", + TITLE = "TITLE", +} + +export interface AssociateMemberRequest { + /** + *

                                  The Amazon Web Services account ID of the member account to be associated.

                                  + */ + accountId: string | undefined; +} + +export namespace AssociateMemberRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateMemberRequest): any => ({ + ...obj, + }); +} + +export interface AssociateMemberResponse { + /** + *

                                  The Amazon Web Services account ID of the successfully associated member account.

                                  + */ + accountId: string | undefined; +} + +export namespace AssociateMemberResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateMemberResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  The request has failed due to an internal failure of the Amazon Inspector service.

                                  + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + $retryable: {}; + message: string | undefined; + /** + *

                                  The number of seconds to wait before retrying the request.

                                  + */ + retryAfterSeconds?: number; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

                                  The limit on the number of requests per second was exceeded.

                                  + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message: string | undefined; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that describes a validation exception.

                                  + */ +export interface ValidationExceptionField { + /** + *

                                  The name of the validation exception.

                                  + */ + name: string | undefined; + + /** + *

                                  The validation exception message.

                                  + */ + message: string | undefined; +} + +export namespace ValidationExceptionField { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationExceptionField): any => ({ + ...obj, + }); +} + +export enum ValidationExceptionReason { + CANNOT_PARSE = "CANNOT_PARSE", + FIELD_VALIDATION_FAILED = "FIELD_VALIDATION_FAILED", + OTHER = "OTHER", +} + +/** + *

                                  The request has failed validation due to missing required fields or having invalid + * inputs.

                                  + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message: string | undefined; + /** + *

                                  The reason for the validation failure.

                                  + */ + reason: ValidationExceptionReason | string | undefined; + + /** + *

                                  The fields that failed validation.

                                  + */ + fields?: ValidationExceptionField[]; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +/** + *

                                  Represents which scan types are automatically enabled for new members of your Amazon Inspector organization.

                                  + */ +export interface AutoEnable { + /** + *

                                  Represents whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector + * organization.

                                  + */ + ec2: boolean | undefined; + + /** + *

                                  Represents whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector + * organization.

                                  + */ + ecr: boolean | undefined; +} + +export namespace AutoEnable { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AutoEnable): any => ({ + ...obj, + }); +} + +/** + *

                                  Details of the Amazon EC2 instance involved in a finding.

                                  + */ +export interface AwsEc2InstanceDetails { + /** + *

                                  The type of the Amazon EC2 instance.

                                  + */ + type?: string; + + /** + *

                                  The image ID of the Amazon EC2 instance.

                                  + */ + imageId?: string; + + /** + *

                                  The IPv4 addresses of the Amazon EC2 instance.

                                  + */ + ipV4Addresses?: string[]; + + /** + *

                                  The IPv6 addresses of the Amazon EC2 instance.

                                  + */ + ipV6Addresses?: string[]; + + /** + *

                                  The name of the key pair used to launch the Amazon EC2 instance.

                                  + */ + keyName?: string; + + /** + *

                                  The IAM instance profile ARN of the Amazon EC2 instance.

                                  + */ + iamInstanceProfileArn?: string; + + /** + *

                                  The VPC ID of the Amazon EC2 instance.

                                  + */ + vpcId?: string; + + /** + *

                                  The subnet ID of the Amazon EC2 instance.

                                  + */ + subnetId?: string; + + /** + *

                                  The date and time the Amazon EC2 instance was launched at.

                                  + */ + launchedAt?: Date; + + /** + *

                                  The platform of the Amazon EC2 instance.

                                  + */ + platform?: string; +} + +export namespace AwsEc2InstanceDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AwsEc2InstanceDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  The image details of the Amazon ECR container image.

                                  + */ +export interface AwsEcrContainerImageDetails { + /** + *

                                  The name of the repository the Amazon ECR container image resides in.

                                  + */ + repositoryName: string | undefined; + + /** + *

                                  The image tags attached to the Amazon ECR container image.

                                  + */ + imageTags?: string[]; + + /** + *

                                  The date and time the Amazon ECR container image was pushed.

                                  + */ + pushedAt?: Date; + + /** + *

                                  The image author of the Amazon ECR container image.

                                  + */ + author?: string; + + /** + *

                                  The architecture of the Amazon ECR container image.

                                  + */ + architecture?: string; + + /** + *

                                  The image hash of the Amazon ECR container image.

                                  + */ + imageHash: string | undefined; + + /** + *

                                  The registry the Amazon ECR container image belongs to.

                                  + */ + registry: string | undefined; + + /** + *

                                  The platform of the Amazon ECR container image.

                                  + */ + platform?: string; +} + +export namespace AwsEcrContainerImageDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AwsEcrContainerImageDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  One or more tags submitted as part of the request is not valid.

                                  + */ +export interface BadRequestException extends __SmithyException, $MetadataBearer { + name: "BadRequestException"; + $fault: "client"; + message: string | undefined; +} + +export namespace BadRequestException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BadRequestException): any => ({ + ...obj, + }); +} + +export interface BatchGetAccountStatusRequest { + /** + *

                                  The 12-digit Amazon Web Services account IDs of the accounts to retrieve Amazon Inspector status for.

                                  + */ + accountIds?: string[]; +} + +export namespace BatchGetAccountStatusRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetAccountStatusRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  An object with details on why an account failed to enable Amazon Inspector.

                                  + */ +export interface FailedAccount { + /** + *

                                  The Amazon Web Services account ID.

                                  + */ + accountId: string | undefined; + + /** + *

                                  The status of Amazon Inspector for the account.

                                  + */ + status?: Status | string; + + /** + *

                                  An object detailing which resources Amazon Inspector is enabled to scan for the account.

                                  + */ + resourceStatus?: ResourceStatus; + + /** + *

                                  The error code explaining why the account failed to enable Amazon Inspector.

                                  + */ + errorCode: ErrorCode | string | undefined; + + /** + *

                                  The error message received when the account failed to enable Amazon Inspector.

                                  + */ + errorMessage: string | undefined; +} + +export namespace FailedAccount { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FailedAccount): any => ({ + ...obj, + }); +} + +export interface BatchGetAccountStatusResponse { + /** + *

                                  An array of objects that provide details on the status of Amazon Inspector for each of the requested accounts.

                                  + */ + accounts: AccountState[] | undefined; + + /** + *

                                  An array of objects detailing any accounts that failed to enable Amazon Inspector and why.

                                  + */ + failedAccounts?: FailedAccount[]; +} + +export namespace BatchGetAccountStatusResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetAccountStatusResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  The operation tried to access an invalid resource. Make sure the resource is specified correctly.

                                  + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message: string | undefined; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +export interface BatchGetFreeTrialInfoRequest { + /** + *

                                  The account IDs to get free trial status for.

                                  + */ + accountIds: string[] | undefined; +} + +export namespace BatchGetFreeTrialInfoRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetFreeTrialInfoRequest): any => ({ + ...obj, + }); +} + +export enum FreeTrialStatus { + ACTIVE = "ACTIVE", + INACTIVE = "INACTIVE", +} + +export enum FreeTrialType { + EC2 = "EC2", + ECR = "ECR", +} + +/** + *

                                  An object that contains information about the Amazon Inspector free trial for an account.

                                  + */ +export interface FreeTrialInfo { + /** + *

                                  The type of scan covered by the Amazon Inspector free trail.

                                  + */ + type: FreeTrialType | string | undefined; + + /** + *

                                  The date and time that the Amazon Inspector free trail started for a given account.

                                  + */ + start: Date | undefined; + + /** + *

                                  The date and time that the Amazon Inspector free trail ends for a given account.

                                  + */ + end: Date | undefined; + + /** + *

                                  The order to sort results by.

                                  + */ + status: FreeTrialStatus | string | undefined; +} + +export namespace FreeTrialInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FreeTrialInfo): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about the Amazon Inspector free trial for an account.

                                  + */ +export interface FreeTrialAccountInfo { + /** + *

                                  The account associated with the Amazon Inspector free trial information.

                                  + */ + accountId: string | undefined; + + /** + *

                                  Contains information about the Amazon Inspector free trial for an account.

                                  + */ + freeTrialInfo: FreeTrialInfo[] | undefined; +} + +export namespace FreeTrialAccountInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FreeTrialAccountInfo): any => ({ + ...obj, + }); +} + +export enum FreeTrialInfoErrorCode { + ACCESS_DENIED = "ACCESS_DENIED", + INTERNAL_ERROR = "INTERNAL_ERROR", +} + +/** + *

                                  Information about an error received while accessing free trail data for an account.

                                  + */ +export interface FreeTrialInfoError { + /** + *

                                  The account associated with the Amazon Inspector free trial information.

                                  + */ + accountId: string | undefined; + + /** + *

                                  The error code.

                                  + */ + code: FreeTrialInfoErrorCode | string | undefined; + + /** + *

                                  The error message returned.

                                  + */ + message: string | undefined; +} + +export namespace FreeTrialInfoError { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FreeTrialInfoError): any => ({ + ...obj, + }); +} + +export interface BatchGetFreeTrialInfoResponse { + /** + *

                                  An array of objects that provide Amazon Inspector free trial details for each of the requested accounts. + *

                                  + */ + accounts: FreeTrialAccountInfo[] | undefined; + + /** + *

                                  An array of objects detailing any accounts that free trial data could not be returned for.

                                  + */ + failedAccounts: FreeTrialInfoError[] | undefined; +} + +export namespace BatchGetFreeTrialInfoResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchGetFreeTrialInfoResponse): any => ({ + ...obj, + }); +} + +export interface CancelFindingsReportRequest { + /** + *

                                  The ID of the report to be canceled.

                                  + */ + reportId: string | undefined; +} + +export namespace CancelFindingsReportRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CancelFindingsReportRequest): any => ({ + ...obj, + }); +} + +export interface CancelFindingsReportResponse { + /** + *

                                  The ID of the canceled report.

                                  + */ + reportId: string | undefined; +} + +export namespace CancelFindingsReportResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CancelFindingsReportResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A conflict occurred.

                                  + */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message: string | undefined; + /** + *

                                  The ID of the conflicting resource.

                                  + */ + resourceId: string | undefined; + + /** + *

                                  The type of the conflicting resource.

                                  + */ + resourceType: string | undefined; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +export enum GroupKey { + ACCOUNT_ID = "ACCOUNT_ID", + ECR_REPOSITORY_NAME = "ECR_REPOSITORY_NAME", + RESOURCE_TYPE = "RESOURCE_TYPE", + SCAN_STATUS_CODE = "SCAN_STATUS_CODE", + SCAN_STATUS_REASON = "SCAN_STATUS_REASON", +} + +/** + *

                                  a structure that contains information on the count of resources within a group.

                                  + */ +export interface Counts { + /** + *

                                  The number of resources.

                                  + */ + count?: number; + + /** + *

                                  The key associated with this group

                                  + */ + groupKey?: GroupKey | string; +} + +export namespace Counts { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Counts): any => ({ + ...obj, + }); +} + +export enum CoverageStringComparison { + EQUALS = "EQUALS", + NOT_EQUALS = "NOT_EQUALS", +} + +/** + *

                                  Contains details of a coverage string filter.

                                  + */ +export interface CoverageStringFilter { + /** + *

                                  The operator to compare strings on.

                                  + */ + comparison: CoverageStringComparison | string | undefined; + + /** + *

                                  The value to compare strings on.

                                  + */ + value: string | undefined; +} + +export namespace CoverageStringFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CoverageStringFilter): any => ({ + ...obj, + }); +} + +export enum CoverageMapComparison { + EQUALS = "EQUALS", +} + +/** + *

                                  Contains details of a coverage map filter.

                                  + */ +export interface CoverageMapFilter { + /** + *

                                  The operator to compare coverage on.

                                  + */ + comparison: CoverageMapComparison | string | undefined; + + /** + *

                                  The tag key associated with the coverage map filter.

                                  + */ + key: string | undefined; + + /** + *

                                  The tag value associated with the coverage map filter.

                                  + */ + value?: string; +} + +export namespace CoverageMapFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CoverageMapFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that identifies filter criteria for GetCoverageStatistics.

                                  + */ +export interface CoverageFilterCriteria { + /** + *

                                  The scan status code to filter on.

                                  + */ + scanStatusCode?: CoverageStringFilter[]; + + /** + *

                                  The scan status reason to filter on.

                                  + */ + scanStatusReason?: CoverageStringFilter[]; + + /** + *

                                  An array of Amazon Web Services account IDs to return coverage statistics for.

                                  + */ + accountId?: CoverageStringFilter[]; + + /** + *

                                  An array of Amazon Web Services resource IDs to return coverage statistics for.

                                  + */ + resourceId?: CoverageStringFilter[]; + + /** + *

                                  An array of Amazon Web Services resource types to return coverage statistics for.

                                  + */ + resourceType?: CoverageStringFilter[]; + + /** + *

                                  An array of Amazon Inspector scan types to return coverage statistics for.

                                  + */ + scanType?: CoverageStringFilter[]; + + /** + *

                                  The Amazon ECR repository name to filter on.

                                  + */ + ecrRepositoryName?: CoverageStringFilter[]; + + /** + *

                                  The Amazon ECR image tags to filter on.

                                  + */ + ecrImageTags?: CoverageStringFilter[]; + + /** + *

                                  The Amazon EC2 instance tags to filter on.

                                  + */ + ec2InstanceTags?: CoverageMapFilter[]; +} + +export namespace CoverageFilterCriteria { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CoverageFilterCriteria): any => ({ + ...obj, + }); +} + +export enum CoverageResourceType { + AWS_EC2_INSTANCE = "AWS_EC2_INSTANCE", + AWS_ECR_CONTAINER_IMAGE = "AWS_ECR_CONTAINER_IMAGE", + AWS_ECR_REPOSITORY = "AWS_ECR_REPOSITORY", +} + +export enum Ec2Platform { + LINUX = "LINUX", + UNKNOWN = "UNKNOWN", + WINDOWS = "WINDOWS", +} + +/** + *

                                  Meta data details of an Amazon EC2 instance.

                                  + */ +export interface Ec2Metadata { + /** + *

                                  The tags attached to the instance.

                                  + */ + tags?: { [key: string]: string }; + + /** + *

                                  The ID of the Amazon Machine Image (AMI) used to launch the instance.

                                  + */ + amiId?: string; + + /** + *

                                  The platform of the instance.

                                  + */ + platform?: Ec2Platform | string; +} + +export namespace Ec2Metadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Ec2Metadata): any => ({ + ...obj, + }); +} + +/** + *

                                  Information on the Amazon ECR image metadata associated with a finding.

                                  + */ +export interface EcrContainerImageMetadata { + /** + *

                                  Tags associated with the Amazon ECR image metadata.

                                  + */ + tags?: string[]; +} + +export namespace EcrContainerImageMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EcrContainerImageMetadata): any => ({ + ...obj, + }); +} + +export enum EcrScanFrequency { + CONTINUOUS_SCAN = "CONTINUOUS_SCAN", + MANUAL = "MANUAL", + SCAN_ON_PUSH = "SCAN_ON_PUSH", +} + +/** + *

                                  Information on the Amazon ECR repository metadata associated with a finding.

                                  + */ +export interface EcrRepositoryMetadata { + /** + *

                                  The name of the Amazon ECR repository.

                                  + */ + name?: string; + + /** + *

                                  The frequency of scans.

                                  + */ + scanFrequency?: EcrScanFrequency | string; +} + +export namespace EcrRepositoryMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EcrRepositoryMetadata): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that contains details about the metadata for an Amazon ECR resource.

                                  + */ +export interface ResourceScanMetadata { + /** + *

                                  An object that contains details about the repository an Amazon ECR image resides in.

                                  + */ + ecrRepository?: EcrRepositoryMetadata; + + /** + *

                                  An object that contains details about the container metadata for an Amazon ECR image.

                                  + */ + ecrImage?: EcrContainerImageMetadata; + + /** + *

                                  An object that contains metadata details for an Amazon EC2 instance.

                                  + */ + ec2?: Ec2Metadata; +} + +export namespace ResourceScanMetadata { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceScanMetadata): any => ({ + ...obj, + }); +} + +export enum ScanStatusReason { + ACCESS_DENIED = "ACCESS_DENIED", + EC2_INSTANCE_STOPPED = "EC2_INSTANCE_STOPPED", + IMAGE_SIZE_EXCEEDED = "IMAGE_SIZE_EXCEEDED", + INTERNAL_ERROR = "INTERNAL_ERROR", + NO_RESOURCES_FOUND = "NO_RESOURCES_FOUND", + PENDING_INITIAL_SCAN = "PENDING_INITIAL_SCAN", + RESOURCE_TERMINATED = "RESOURCE_TERMINATED", + SCAN_ELIGIBILITY_EXPIRED = "SCAN_ELIGIBILITY_EXPIRED", + SCAN_FREQUENCY_MANUAL = "SCAN_FREQUENCY_MANUAL", + SCAN_FREQUENCY_SCAN_ON_PUSH = "SCAN_FREQUENCY_SCAN_ON_PUSH", + SUCCESSFUL = "SUCCESSFUL", + UNMANAGED_EC2_INSTANCE = "UNMANAGED_EC2_INSTANCE", + UNSUPPORTED_OS = "UNSUPPORTED_OS", +} + +export enum ScanStatusCode { + ACTIVE = "ACTIVE", + INACTIVE = "INACTIVE", +} + +/** + *

                                  The status of the scan.

                                  + */ +export interface ScanStatus { + /** + *

                                  The status code of the scan.

                                  + */ + statusCode: ScanStatusCode | string | undefined; + + /** + *

                                  The reason for the scan.

                                  + */ + reason: ScanStatusReason | string | undefined; +} + +export namespace ScanStatus { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ScanStatus): any => ({ + ...obj, + }); +} + +export enum ScanType { + NETWORK = "NETWORK", + PACKAGE = "PACKAGE", +} + +/** + *

                                  An object that contains details about a resource covered by Amazon Inspector.

                                  + */ +export interface CoveredResource { + /** + *

                                  The type of the covered resource.

                                  + */ + resourceType: CoverageResourceType | string | undefined; + + /** + *

                                  The ID of the covered resource.

                                  + */ + resourceId: string | undefined; + + /** + *

                                  The Amazon Web Services account ID of the covered resource.

                                  + */ + accountId: string | undefined; + + /** + *

                                  The Amazon Inspector scan type covering the resource.

                                  + */ + scanType: ScanType | string | undefined; + + /** + *

                                  The status of the scan covering the resource.

                                  + */ + scanStatus?: ScanStatus; + + /** + *

                                  An object that contains details about the metadata.

                                  + */ + resourceMetadata?: ResourceScanMetadata; +} + +export namespace CoveredResource { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CoveredResource): any => ({ + ...obj, + }); +} + +export enum FilterAction { + NONE = "NONE", + SUPPRESS = "SUPPRESS", +} + +/** + *

                                  Contains details on the time range used to filter findings.

                                  + */ +export interface DateFilter { + /** + *

                                  A timestamp representing the start of the time period filtered on.

                                  + */ + startInclusive?: Date; + + /** + *

                                  A timestamp representing the end of the time period filtered on.

                                  + */ + endInclusive?: Date; +} + +export namespace DateFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DateFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that describes the details of a number filter.

                                  + */ +export interface NumberFilter { + /** + *

                                  The highest number to be included in the filter.

                                  + */ + upperInclusive?: number; + + /** + *

                                  The lowest number to be included in the filter.

                                  + */ + lowerInclusive?: number; +} + +export namespace NumberFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NumberFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that describes the details of a port range filter.

                                  + */ +export interface PortRangeFilter { + /** + *

                                  The port number the port range begins at.

                                  + */ + beginInclusive?: number; + + /** + *

                                  The port number the port range ends at.

                                  + */ + endInclusive?: number; +} + +export namespace PortRangeFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PortRangeFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains information on the details of a package filter.

                                  + */ +export interface PackageFilter { + /** + *

                                  An object that contains details on the name of the package to filter on.

                                  + */ + name?: StringFilter; + + /** + *

                                  The package version to filter on.

                                  + */ + version?: StringFilter; + + /** + *

                                  An object that contains details on the package epoch to filter on.

                                  + */ + epoch?: NumberFilter; + + /** + *

                                  An object that contains details on the package release to filter on.

                                  + */ + release?: StringFilter; + + /** + *

                                  An object that contains details on the package architecture type to filter on.

                                  + */ + architecture?: StringFilter; + + /** + *

                                  An object that contains details on the source layer hash to filter on.

                                  + */ + sourceLayerHash?: StringFilter; +} + +export namespace PackageFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PackageFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  Details on the criteria used to define the filter.

                                  + */ +export interface FilterCriteria { + /** + *

                                  Details on the finding ARNs used to filter findings.

                                  + */ + findingArn?: StringFilter[]; + + /** + *

                                  Details of the Amazon Web Services account IDs used to filter findings.

                                  + */ + awsAccountId?: StringFilter[]; + + /** + *

                                  Details on the finding types used to filter findings.

                                  + */ + findingType?: StringFilter[]; + + /** + *

                                  Details on the severity used to filter findings.

                                  + */ + severity?: StringFilter[]; + + /** + *

                                  Details on the date and time a finding was first seen used to filter findings.

                                  + */ + firstObservedAt?: DateFilter[]; + + /** + *

                                  Details on the date and time a finding was last seen used to filter findings.

                                  + */ + lastObservedAt?: DateFilter[]; + + /** + *

                                  Details on the date and time a finding was last updated at used to filter findings.

                                  + */ + updatedAt?: DateFilter[]; + + /** + *

                                  Details on the finding status types used to filter findings.

                                  + */ + findingStatus?: StringFilter[]; + + /** + *

                                  Details on the finding title used to filter findings.

                                  + */ + title?: StringFilter[]; + + /** + *

                                  The Amazon Inspector score to filter on.

                                  + */ + inspectorScore?: NumberFilter[]; + + /** + *

                                  Details on the resource types used to filter findings.

                                  + */ + resourceType?: StringFilter[]; + + /** + *

                                  Details on the resource IDs used to filter findings.

                                  + */ + resourceId?: StringFilter[]; + + /** + *

                                  Details on the resource tags used to filter findings.

                                  + */ + resourceTags?: MapFilter[]; + + /** + *

                                  Details of the Amazon EC2 instance image IDs used to filter findings.

                                  + */ + ec2InstanceImageId?: StringFilter[]; + + /** + *

                                  Details of the Amazon EC2 instance VPC IDs used to filter findings.

                                  + */ + ec2InstanceVpcId?: StringFilter[]; + + /** + *

                                  Details of the Amazon EC2 instance subnet IDs used to filter findings.

                                  + */ + ec2InstanceSubnetId?: StringFilter[]; + + /** + *

                                  Details on the Amazon ECR image push date and time used to filter findings.

                                  + */ + ecrImagePushedAt?: DateFilter[]; + + /** + *

                                  Details of the Amazon ECR image architecture types used to filter findings.

                                  + */ + ecrImageArchitecture?: StringFilter[]; + + /** + *

                                  Details on the Amazon ECR registry used to filter findings.

                                  + */ + ecrImageRegistry?: StringFilter[]; + + /** + *

                                  Details on the name of the Amazon ECR repository used to filter findings.

                                  + */ + ecrImageRepositoryName?: StringFilter[]; + + /** + *

                                  The tags attached to the Amazon ECR container image.

                                  + */ + ecrImageTags?: StringFilter[]; + + /** + *

                                  Details of the Amazon ECR image hashes used to filter findings.

                                  + */ + ecrImageHash?: StringFilter[]; + + /** + *

                                  Details on the port ranges used to filter findings.

                                  + */ + portRange?: PortRangeFilter[]; + + /** + *

                                  Details on the ingress source addresses used to filter findings.

                                  + */ + networkProtocol?: StringFilter[]; + + /** + *

                                  Details of the component IDs used to filter findings.

                                  + */ + componentId?: StringFilter[]; + + /** + *

                                  Details of the component types used to filter findings.

                                  + */ + componentType?: StringFilter[]; + + /** + *

                                  Details on the vulnerability ID used to filter findings.

                                  + */ + vulnerabilityId?: StringFilter[]; + + /** + *

                                  Details on the vulnerability type used to filter findings.

                                  + */ + vulnerabilitySource?: StringFilter[]; + + /** + *

                                  Details on the vendor severity used to filter findings.

                                  + */ + vendorSeverity?: StringFilter[]; + + /** + *

                                  Details on the vulnerable packages used to filter findings.

                                  + */ + vulnerablePackages?: PackageFilter[]; + + /** + *

                                  Details on the related vulnerabilities used to filter findings.

                                  + */ + relatedVulnerabilities?: StringFilter[]; +} + +export namespace FilterCriteria { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FilterCriteria): any => ({ + ...obj, + }); +} + +export interface CreateFilterRequest { + /** + *

                                  Defines the action that is to be applied to the findings that match the filter.

                                  + */ + action: FilterAction | string | undefined; + + /** + *

                                  A description of the filter.

                                  + */ + description?: string; + + /** + *

                                  Defines the criteria to be used in the filter for querying findings.

                                  + */ + filterCriteria: FilterCriteria | undefined; + + /** + *

                                  The name of the filter. Minimum length of 3. Maximum length of 64. Valid characters + * include alphanumeric characters, dot (.), underscore (_), and dash (-). Spaces are not + * allowed.

                                  + */ + name: string | undefined; + + /** + *

                                  A list of tags for the filter.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace CreateFilterRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFilterRequest): any => ({ + ...obj, + }); +} + +export interface CreateFilterResponse { + /** + *

                                  The Amazon Resource Number (ARN) of the successfully created filter.

                                  + */ + arn: string | undefined; +} + +export namespace CreateFilterResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFilterResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  You have exceeded your service quota. To perform the requested action, remove some of + * the relevant resources, or use Service Quotas to request a service quota increase.

                                  + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + message: string | undefined; + /** + *

                                  The ID of the resource that exceeds a service quota.

                                  + */ + resourceId: string | undefined; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +export enum ReportFormat { + CSV = "CSV", + JSON = "JSON", +} + +/** + *

                                  Contains details of the Amazon S3 bucket and KMS key used to export findings.

                                  + */ +export interface Destination { + /** + *

                                  The name of the Amazon S3 bucket to export findings to.

                                  + */ + bucketName: string | undefined; + + /** + *

                                  The prefix of the KMS key used to export findings.

                                  + */ + keyPrefix?: string; + + /** + *

                                  The ARN of the KMS key used to encrypt data when exporting findings.

                                  + */ + kmsKeyArn: string | undefined; +} + +export namespace Destination { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Destination): any => ({ + ...obj, + }); +} + +export interface CreateFindingsReportRequest { + /** + *

                                  The filter criteria to apply to the results of the finding report.

                                  + */ + filterCriteria?: FilterCriteria; + + /** + *

                                  The format to generate the report in.

                                  + */ + reportFormat: ReportFormat | string | undefined; + + /** + *

                                  The Amazon S3 export destination for the report.

                                  + */ + s3Destination: Destination | undefined; +} + +export namespace CreateFindingsReportRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFindingsReportRequest): any => ({ + ...obj, + }); +} + +export interface CreateFindingsReportResponse { + /** + *

                                  The ID of the report.

                                  + */ + reportId?: string; +} + +export namespace CreateFindingsReportResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateFindingsReportResponse): any => ({ + ...obj, + }); +} + +export enum Currency { + USD = "USD", +} + +/** + *

                                  The CVSS score for a finding.

                                  + */ +export interface CvssScore { + /** + *

                                  The base CVSS score used for the finding.

                                  + */ + baseScore: number | undefined; + + /** + *

                                  The vector string of the CVSS score.

                                  + */ + scoringVector: string | undefined; + + /** + *

                                  The version of CVSS used for the score.

                                  + */ + version: string | undefined; + + /** + *

                                  The source of the CVSS score.

                                  + */ + source: string | undefined; +} + +export namespace CvssScore { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CvssScore): any => ({ + ...obj, + }); +} + +/** + *

                                  Details on adjustments Amazon Inspector made to the CVSS score for a finding.

                                  + */ +export interface CvssScoreAdjustment { + /** + *

                                  The metric used to adjust the CVSS score.

                                  + */ + metric: string | undefined; + + /** + *

                                  The reason the CVSS score has been adjustment.

                                  + */ + reason: string | undefined; +} + +export namespace CvssScoreAdjustment { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CvssScoreAdjustment): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about the CVSS score.

                                  + */ +export interface CvssScoreDetails { + /** + *

                                  The source for the CVSS score.

                                  + */ + scoreSource: string | undefined; + + /** + *

                                  The source of the CVSS data.

                                  + */ + cvssSource?: string; + + /** + *

                                  The CVSS version used in scoring.

                                  + */ + version: string | undefined; + + /** + *

                                  The CVSS score.

                                  + */ + score: number | undefined; + + /** + *

                                  The vector for the CVSS score.

                                  + */ + scoringVector: string | undefined; + + /** + *

                                  An object that contains details about adjustment Amazon Inspector made to the CVSS score.

                                  + */ + adjustments?: CvssScoreAdjustment[]; +} + +export namespace CvssScoreDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CvssScoreDetails): any => ({ + ...obj, + }); +} + +export enum RelationshipStatus { + ACCOUNT_SUSPENDED = "ACCOUNT_SUSPENDED", + CANNOT_CREATE_DETECTOR_IN_ORG_MASTER = "CANNOT_CREATE_DETECTOR_IN_ORG_MASTER", + CREATED = "CREATED", + DELETED = "DELETED", + DISABLED = "DISABLED", + EMAIL_VERIFICATION_FAILED = "EMAIL_VERIFICATION_FAILED", + EMAIL_VERIFICATION_IN_PROGRESS = "EMAIL_VERIFICATION_IN_PROGRESS", + ENABLED = "ENABLED", + INVITED = "INVITED", + REGION_DISABLED = "REGION_DISABLED", + REMOVED = "REMOVED", + RESIGNED = "RESIGNED", +} + +/** + *

                                  Details of the Amazon Inspector delegated administrator for your organization.

                                  + */ +export interface DelegatedAdmin { + /** + *

                                  The Amazon Web Services account ID of the Amazon Inspector delegated administrator for your organization.

                                  + */ + accountId?: string; + + /** + *

                                  The status of the Amazon Inspector delegated administrator.

                                  + */ + relationshipStatus?: RelationshipStatus | string; +} + +export namespace DelegatedAdmin { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DelegatedAdmin): any => ({ + ...obj, + }); +} + +export enum DelegatedAdminStatus { + DISABLE_IN_PROGRESS = "DISABLE_IN_PROGRESS", + ENABLED = "ENABLED", +} + +/** + *

                                  Details of the Amazon Inspector delegated administrator for your organization.

                                  + */ +export interface DelegatedAdminAccount { + /** + *

                                  The Amazon Web Services account ID of the Amazon Inspector delegated administrator for your organization.

                                  + */ + accountId?: string; + + /** + *

                                  The status of the Amazon Inspector delegated administrator.

                                  + */ + status?: DelegatedAdminStatus | string; +} + +export namespace DelegatedAdminAccount { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DelegatedAdminAccount): any => ({ + ...obj, + }); +} + +export interface DeleteFilterRequest { + /** + *

                                  The Amazon Resource Number (ARN) of the filter to be deleted.

                                  + */ + arn: string | undefined; +} + +export namespace DeleteFilterRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFilterRequest): any => ({ + ...obj, + }); +} + +export interface DeleteFilterResponse { + /** + *

                                  The Amazon Resource Number (ARN) of the filter that has been deleted.

                                  + */ + arn: string | undefined; +} + +export namespace DeleteFilterResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteFilterResponse): any => ({ + ...obj, + }); +} + +export interface DescribeOrganizationConfigurationRequest {} + +export namespace DescribeOrganizationConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeOrganizationConfigurationRequest): any => ({ + ...obj, + }); +} + +export interface DescribeOrganizationConfigurationResponse { + /** + *

                                  The scan types are automatically enabled for new members of your organization.

                                  + */ + autoEnable?: AutoEnable; + + /** + *

                                  Represents whether your organization has reached the maximum Amazon Web Services account limit for Amazon Inspector.

                                  + */ + maxAccountLimitReached?: boolean; +} + +export namespace DescribeOrganizationConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeOrganizationConfigurationResponse): any => ({ + ...obj, + }); +} + +export enum ResourceScanType { + EC2 = "EC2", + ECR = "ECR", +} + +export interface DisableRequest { + /** + *

                                  An array of account IDs you want to disable Amazon Inspector scans for.

                                  + */ + accountIds?: string[]; + + /** + *

                                  The resource scan types you want to disable.

                                  + */ + resourceTypes?: (ResourceScanType | string)[]; +} + +export namespace DisableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableRequest): any => ({ + ...obj, + }); +} + +export interface DisableResponse { + /** + *

                                  Information on the accounts that have had Amazon Inspector scans successfully disabled. Details are + * provided for each account.

                                  + */ + accounts: Account[] | undefined; + + /** + *

                                  Information on any accounts for which Amazon Inspector scans could not be disabled. Details are + * provided for each account.

                                  + */ + failedAccounts?: FailedAccount[]; +} + +export namespace DisableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableResponse): any => ({ + ...obj, + }); +} + +export interface DisableDelegatedAdminAccountRequest { + /** + *

                                  The Amazon Web Services account ID of the current Amazon Inspector delegated administrator.

                                  + */ + delegatedAdminAccountId: string | undefined; +} + +export namespace DisableDelegatedAdminAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableDelegatedAdminAccountRequest): any => ({ + ...obj, + }); +} + +export interface DisableDelegatedAdminAccountResponse { + /** + *

                                  The Amazon Web Services account ID of the successfully disabled delegated administrator.

                                  + */ + delegatedAdminAccountId: string | undefined; +} + +export namespace DisableDelegatedAdminAccountResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisableDelegatedAdminAccountResponse): any => ({ + ...obj, + }); +} + +export interface DisassociateMemberRequest { + /** + *

                                  The Amazon Web Services account ID of the member account to disassociate.

                                  + */ + accountId: string | undefined; +} + +export namespace DisassociateMemberRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateMemberRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateMemberResponse { + /** + *

                                  The Amazon Web Services account ID of the successfully disassociated member.

                                  + */ + accountId: string | undefined; +} + +export namespace DisassociateMemberResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateMemberResponse): any => ({ + ...obj, + }); +} + +export interface EnableRequest { + /** + *

                                  A list of account IDs you want to enable Amazon Inspector scans for.

                                  + */ + accountIds?: string[]; + + /** + *

                                  The resource scan types you want to enable.

                                  + */ + resourceTypes: (ResourceScanType | string)[] | undefined; + + /** + *

                                  The idempotency token for the request.

                                  + */ + clientToken?: string; +} + +export namespace EnableRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableRequest): any => ({ + ...obj, + }); +} + +export interface EnableResponse { + /** + *

                                  Information on the accounts that have had Amazon Inspector scans successfully enabled. Details are + * provided for each account.

                                  + */ + accounts: Account[] | undefined; + + /** + *

                                  Information on any accounts for which Amazon Inspector scans could not be enabled. Details are + * provided for each account.

                                  + */ + failedAccounts?: FailedAccount[]; +} + +export namespace EnableResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableResponse): any => ({ + ...obj, + }); +} + +export interface EnableDelegatedAdminAccountRequest { + /** + *

                                  The Amazon Web Services account ID of the Amazon Inspector delegated administrator.

                                  + */ + delegatedAdminAccountId: string | undefined; + + /** + *

                                  The idempotency token for the request.

                                  + */ + clientToken?: string; +} + +export namespace EnableDelegatedAdminAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableDelegatedAdminAccountRequest): any => ({ + ...obj, + }); +} + +export interface EnableDelegatedAdminAccountResponse { + /** + *

                                  The Amazon Web Services account ID of the successfully Amazon Inspector delegated administrator.

                                  + */ + delegatedAdminAccountId: string | undefined; +} + +export namespace EnableDelegatedAdminAccountResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EnableDelegatedAdminAccountResponse): any => ({ + ...obj, + }); +} + +export enum ExternalReportStatus { + CANCELLED = "CANCELLED", + FAILED = "FAILED", + IN_PROGRESS = "IN_PROGRESS", + SUCCEEDED = "SUCCEEDED", +} + +/** + *

                                  Details about a filter.

                                  + */ +export interface Filter { + /** + *

                                  The Amazon Resource Number (ARN) associated with this filter.

                                  + */ + arn: string | undefined; + + /** + *

                                  The Amazon Web Services account ID of the account that created the filter.

                                  + */ + ownerId: string | undefined; + + /** + *

                                  The name of the filter.

                                  + */ + name: string | undefined; + + /** + *

                                  Details on the filter criteria associated with this filter.

                                  + */ + criteria: FilterCriteria | undefined; + + /** + *

                                  The action that is to be applied to the findings that match the filter.

                                  + */ + action: FilterAction | string | undefined; + + /** + *

                                  The date and time this filter was created at.

                                  + */ + createdAt: Date | undefined; + + /** + *

                                  The date and time the filter was last updated at.

                                  + */ + updatedAt: Date | undefined; + + /** + *

                                  A description of the filter.

                                  + */ + description?: string; + + /** + *

                                  The reason for the filter.

                                  + */ + reason?: string; + + /** + *

                                  The tags attached to the filter.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace Filter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Filter): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about the Amazon Inspector score given to a finding.

                                  + */ +export interface InspectorScoreDetails { + /** + *

                                  An object that contains details about the CVSS score given to a finding.

                                  + */ + adjustedCvss?: CvssScoreDetails; +} + +export namespace InspectorScoreDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InspectorScoreDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  Details about the step associated with a finding.

                                  + */ +export interface Step { + /** + *

                                  The component ID.

                                  + */ + componentId: string | undefined; + + /** + *

                                  The component type.

                                  + */ + componentType: string | undefined; +} + +export namespace Step { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Step): any => ({ + ...obj, + }); +} + +/** + *

                                  Information on the network path associated with a finding.

                                  + */ +export interface NetworkPath { + /** + *

                                  The details on the steps in the network path.

                                  + */ + steps?: Step[]; +} + +export namespace NetworkPath { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NetworkPath): any => ({ + ...obj, + }); +} + +/** + *

                                  Details about the port range associated with a finding.

                                  + */ +export interface PortRange { + /** + *

                                  The beginning port in a port range.

                                  + */ + begin: number | undefined; + + /** + *

                                  The ending port in a port range.

                                  + */ + end: number | undefined; +} + +export namespace PortRange { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PortRange): any => ({ + ...obj, + }); +} + +export enum NetworkProtocol { + TCP = "TCP", + UDP = "UDP", +} + +/** + *

                                  Contains the details of a network reachability finding.

                                  + */ +export interface NetworkReachabilityDetails { + /** + *

                                  An object that contains details about the open port range associated with a finding.

                                  + */ + openPortRange: PortRange | undefined; + + /** + *

                                  The protocol associated with a finding.

                                  + */ + protocol: NetworkProtocol | string | undefined; + + /** + *

                                  An object that contains details about a network path associated with a finding.

                                  + */ + networkPath: NetworkPath | undefined; +} + +export namespace NetworkReachabilityDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NetworkReachabilityDetails): any => ({ + ...obj, + }); +} + +export enum PackageManager { + BUNDLER = "BUNDLER", + CARGO = "CARGO", + COMPOSER = "COMPOSER", + GOBINARY = "GOBINARY", + GOMOD = "GOMOD", + JAR = "JAR", + NPM = "NPM", + NUGET = "NUGET", + OS = "OS", + PIPENV = "PIPENV", + POETRY = "POETRY", + YARN = "YARN", +} + +/** + *

                                  Information on the vulnerable package identified by a finding.

                                  + */ +export interface VulnerablePackage { + /** + *

                                  The name of the vulnerable package.

                                  + */ + name: string | undefined; + + /** + *

                                  The version of the vulnerable package.

                                  + */ + version: string | undefined; + + /** + *

                                  The source layer hash of the vulnerable package.

                                  + */ + sourceLayerHash?: string; + + /** + *

                                  The epoch of the vulnerable package.

                                  + */ + epoch?: number; + + /** + *

                                  The release of the vulnerable package.

                                  + */ + release?: string; + + /** + *

                                  The architecture of the vulnerable package.

                                  + */ + arch?: string; + + /** + *

                                  The package manager of the vulnerable package.

                                  + */ + packageManager?: PackageManager | string; + + /** + *

                                  The file path of the vulnerable package.

                                  + */ + filePath?: string; + + /** + *

                                  The version of the package that contains the vulnerability fix.

                                  + */ + fixedInVersion?: string; +} + +export namespace VulnerablePackage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VulnerablePackage): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about a package vulnerability finding.

                                  + */ +export interface PackageVulnerabilityDetails { + /** + *

                                  The ID given to this vulnerability.

                                  + */ + vulnerabilityId: string | undefined; + + /** + *

                                  The packages impacted by this vulnerability.

                                  + */ + vulnerablePackages: VulnerablePackage[] | undefined; + + /** + *

                                  The source of the vulnerability information.

                                  + */ + source: string | undefined; + + /** + *

                                  An object that contains details about the CVSS score of a finding.

                                  + */ + cvss?: CvssScore[]; + + /** + *

                                  One or more vulnerabilities related to the one identified in this finding.

                                  + */ + relatedVulnerabilities?: string[]; + + /** + *

                                  A URL to the source of the vulnerability information.

                                  + */ + sourceUrl?: string; + + /** + *

                                  The severity the vendor has given to this vulnerability type.

                                  + */ + vendorSeverity?: string; + + /** + *

                                  The date and time that this vulnerability was first added to the vendor's database.

                                  + */ + vendorCreatedAt?: Date; + + /** + *

                                  The date and time the vendor last updated this vulnerability in their database.

                                  + */ + vendorUpdatedAt?: Date; + + /** + *

                                  One or more URLs that contain details about this vulnerability type.

                                  + */ + referenceUrls?: string[]; +} + +export namespace PackageVulnerabilityDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PackageVulnerabilityDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  Details about the recommended course of action to remediate the finding.

                                  + */ +export interface Recommendation { + /** + *

                                  The recommended course of action to remediate the finding.

                                  + */ + text?: string; + + /** + *

                                  The URL address to the CVE remediation recommendations.

                                  + */ + Url?: string; +} + +export namespace Recommendation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Recommendation): any => ({ + ...obj, + }); +} + +/** + *

                                  Information on how to remediate a finding.

                                  + */ +export interface Remediation { + /** + *

                                  An object that contains information about the recommended course of action to remediate the finding.

                                  + */ + recommendation?: Recommendation; +} + +export namespace Remediation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Remediation): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about the resource involved in the finding.

                                  + */ +export interface ResourceDetails { + /** + *

                                  An object that contains details about the Amazon EC2 instance involved in the finding.

                                  + */ + awsEc2Instance?: AwsEc2InstanceDetails; + + /** + *

                                  An object that contains details about the Amazon ECR container image involved in the finding.

                                  + */ + awsEcrContainerImage?: AwsEcrContainerImageDetails; +} + +export namespace ResourceDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceDetails): any => ({ + ...obj, + }); +} + +export enum ResourceType { + AWS_EC2_INSTANCE = "AWS_EC2_INSTANCE", + AWS_ECR_CONTAINER_IMAGE = "AWS_ECR_CONTAINER_IMAGE", + AWS_ECR_REPOSITORY = "AWS_ECR_REPOSITORY", +} + +/** + *

                                  Details about the resource involved in a finding.

                                  + */ +export interface Resource { + /** + *

                                  The type of resource.

                                  + */ + type: ResourceType | string | undefined; + + /** + *

                                  The ID of the resource.

                                  + */ + id: string | undefined; + + /** + *

                                  The partition of the resource.

                                  + */ + partition?: string; + + /** + *

                                  The Amazon Web Services Region the impacted resource is located in.

                                  + */ + region?: string; + + /** + *

                                  The tags attached to the resource.

                                  + */ + tags?: { [key: string]: string }; + + /** + *

                                  An object that contains details about the resource involved in a finding.

                                  + */ + details?: ResourceDetails; +} + +export namespace Resource { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Resource): any => ({ + ...obj, + }); +} + +export enum Severity { + CRITICAL = "CRITICAL", + HIGH = "HIGH", + INFORMATIONAL = "INFORMATIONAL", + LOW = "LOW", + MEDIUM = "MEDIUM", + UNTRIAGED = "UNTRIAGED", +} + +export enum FindingStatus { + ACTIVE = "ACTIVE", + CLOSED = "CLOSED", + SUPPRESSED = "SUPPRESSED", +} + +export enum FindingType { + NETWORK_REACHABILITY = "NETWORK_REACHABILITY", + PACKAGE_VULNERABILITY = "PACKAGE_VULNERABILITY", +} + +/** + *

                                  Details about an Amazon Inspector finding.

                                  + */ +export interface Finding { + /** + *

                                  The Amazon Resource Number (ARN) of the finding.

                                  + */ + findingArn: string | undefined; + + /** + *

                                  The Amazon Web Services account ID associated with the finding.

                                  + */ + awsAccountId: string | undefined; + + /** + *

                                  The type of the finding.

                                  + */ + type: FindingType | string | undefined; + + /** + *

                                  The description of the finding.

                                  + */ + description: string | undefined; + + /** + *

                                  The title of the finding.

                                  + */ + title?: string; + + /** + *

                                  An object that contains the details about how to remediate a finding.

                                  + */ + remediation: Remediation | undefined; + + /** + *

                                  The severity of the finding.

                                  + */ + severity: Severity | string | undefined; + + /** + *

                                  The date and time that the finding was first observed.

                                  + */ + firstObservedAt: Date | undefined; + + /** + *

                                  The date and time that the finding was last observed.

                                  + */ + lastObservedAt: Date | undefined; + + /** + *

                                  The date and time the finding was last updated at.

                                  + */ + updatedAt?: Date; + + /** + *

                                  The status of the finding.

                                  + */ + status: FindingStatus | string | undefined; + + /** + *

                                  Contains information on the resources involved in a finding.

                                  + */ + resources: Resource[] | undefined; + + /** + *

                                  The Amazon Inspector score given to the finding.

                                  + */ + inspectorScore?: number; + + /** + *

                                  An object that contains details of the Amazon Inspector score.

                                  + */ + inspectorScoreDetails?: InspectorScoreDetails; + + /** + *

                                  An object that contains the details of a network reachability finding.

                                  + */ + networkReachabilityDetails?: NetworkReachabilityDetails; + + /** + *

                                  An object that contains the details of a package vulnerability finding.

                                  + */ + packageVulnerabilityDetails?: PackageVulnerabilityDetails; +} + +export namespace Finding { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Finding): any => ({ + ...obj, + }); +} + +export interface GetDelegatedAdminAccountRequest {} + +export namespace GetDelegatedAdminAccountRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDelegatedAdminAccountRequest): any => ({ + ...obj, + }); +} + +export interface GetDelegatedAdminAccountResponse { + /** + *

                                  The Amazon Web Services account ID of the Amazon Inspector delegated administrator.

                                  + */ + delegatedAdmin?: DelegatedAdmin; +} + +export namespace GetDelegatedAdminAccountResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDelegatedAdminAccountResponse): any => ({ + ...obj, + }); +} + +export interface GetFindingsReportStatusRequest { + /** + *

                                  The ID of the report to retrieve the status of.

                                  + */ + reportId?: string; +} + +export namespace GetFindingsReportStatusRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetFindingsReportStatusRequest): any => ({ + ...obj, + }); +} + +export enum ReportingErrorCode { + INTERNAL_ERROR = "INTERNAL_ERROR", + INVALID_PERMISSIONS = "INVALID_PERMISSIONS", +} + +export interface GetFindingsReportStatusResponse { + /** + *

                                  The ID of the report.

                                  + */ + reportId?: string; + + /** + *

                                  The status of the report.

                                  + */ + status?: ExternalReportStatus | string; + + /** + *

                                  The error code of the report.

                                  + */ + errorCode?: ReportingErrorCode | string; + + /** + *

                                  The error message of the report.

                                  + */ + errorMessage?: string; + + /** + *

                                  The destination of the report.

                                  + */ + destination?: Destination; + + /** + *

                                  The filter criteria associated with the report.

                                  + */ + filterCriteria?: FilterCriteria; +} + +export namespace GetFindingsReportStatusResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetFindingsReportStatusResponse): any => ({ + ...obj, + }); +} + +export interface GetMemberRequest { + /** + *

                                  The Amazon Web Services account ID of the member account to retrieve information on.

                                  + */ + accountId: string | undefined; +} + +export namespace GetMemberRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetMemberRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Details on a member account in your organization.

                                  + */ +export interface Member { + /** + *

                                  The Amazon Web Services account ID of the member account.

                                  + */ + accountId?: string; + + /** + *

                                  The status of the member account.

                                  + */ + relationshipStatus?: RelationshipStatus | string; + + /** + *

                                  The Amazon Web Services account ID of the Amazon Inspector delegated administrator for this member account.

                                  + */ + delegatedAdminAccountId?: string; + + /** + *

                                  A timestamp showing when the status of this member was last updated.

                                  + */ + updatedAt?: Date; +} + +export namespace Member { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Member): any => ({ + ...obj, + }); +} + +export interface GetMemberResponse { + /** + *

                                  Details of the retrieved member account.

                                  + */ + member?: Member; +} + +export namespace GetMemberResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetMemberResponse): any => ({ + ...obj, + }); +} + +export enum Service { + EC2 = "EC2", + ECR = "ECR", +} + +export interface ListAccountPermissionsRequest { + /** + *

                                  The service scan type to check permissions for.

                                  + */ + service?: Service | string; + + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListAccountPermissionsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAccountPermissionsRequest): any => ({ + ...obj, + }); +} + +export enum Operation { + DISABLE_REPOSITORY = "DISABLE_REPOSITORY", + DISABLE_SCANNING = "DISABLE_SCANNING", + ENABLE_REPOSITORY = "ENABLE_REPOSITORY", + ENABLE_SCANNING = "ENABLE_SCANNING", +} + +/** + *

                                  Contains information on the permissions an account has within Amazon Inspector.

                                  + */ +export interface Permission { + /** + *

                                  The services that the permissions allow an account to perform the given operations for.

                                  + */ + service: Service | string | undefined; + + /** + *

                                  The operations that can be performed with the given permissions.

                                  + */ + operation: Operation | string | undefined; +} + +export namespace Permission { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Permission): any => ({ + ...obj, + }); +} + +export interface ListAccountPermissionsResponse { + /** + *

                                  Contains details on the permissions an account has to configure Amazon Inspector.

                                  + */ + permissions: Permission[] | undefined; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListAccountPermissionsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAccountPermissionsResponse): any => ({ + ...obj, + }); +} + +export interface ListCoverageRequest { + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  An object that contains details on the filters to apply to the coverage data for your + * environment.

                                  + */ + filterCriteria?: CoverageFilterCriteria; +} + +export namespace ListCoverageRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCoverageRequest): any => ({ + ...obj, + }); +} + +export interface ListCoverageResponse { + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  An object that contains details on the covered resources in your environment.

                                  + */ + coveredResources?: CoveredResource[]; +} + +export namespace ListCoverageResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCoverageResponse): any => ({ + ...obj, + }); +} + +export interface ListCoverageStatisticsRequest { + /** + *

                                  An object that contains details on the filters to apply to the coverage data for your + * environment.

                                  + */ + filterCriteria?: CoverageFilterCriteria; + + /** + *

                                  The value to group the results by.

                                  + */ + groupBy?: GroupKey | string; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListCoverageStatisticsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCoverageStatisticsRequest): any => ({ + ...obj, + }); +} + +export interface ListCoverageStatisticsResponse { + /** + *

                                  An array with the number for each group.

                                  + */ + countsByGroup?: Counts[]; + + /** + *

                                  The total number for all groups.

                                  + */ + totalCounts: number | undefined; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListCoverageStatisticsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListCoverageStatisticsResponse): any => ({ + ...obj, + }); +} + +export interface ListDelegatedAdminAccountsRequest { + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListDelegatedAdminAccountsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDelegatedAdminAccountsRequest): any => ({ + ...obj, + }); +} + +export interface ListDelegatedAdminAccountsResponse { + /** + *

                                  Details of the Amazon Inspector delegated administrator of your organization.

                                  + */ + delegatedAdminAccounts?: DelegatedAdminAccount[]; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListDelegatedAdminAccountsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDelegatedAdminAccountsResponse): any => ({ + ...obj, + }); +} + +export interface ListFiltersRequest { + /** + *

                                  The Amazon resource number (ARN) of the filter.

                                  + */ + arns?: string[]; + + /** + *

                                  The action the filter applies to matched findings.

                                  + */ + action?: FilterAction | string; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; +} + +export namespace ListFiltersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFiltersRequest): any => ({ + ...obj, + }); +} + +export interface ListFiltersResponse { + /** + *

                                  Contains details on the filters associated with your account.

                                  + */ + filters: Filter[] | undefined; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListFiltersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFiltersResponse): any => ({ + ...obj, + }); +} + +export interface ListFindingAggregationsRequest { + /** + *

                                  The type of the aggregation request.

                                  + */ + aggregationType: AggregationType | string | undefined; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  The Amazon Web Services account IDs to retrieve finding aggregation data for.

                                  + */ + accountIds?: StringFilter[]; + + /** + *

                                  Details of the aggregation request that is used to filter your aggregation results.

                                  + */ + aggregationRequest?: AggregationRequest; +} + +export namespace ListFindingAggregationsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFindingAggregationsRequest): any => ({ + ...obj, + ...(obj.aggregationRequest && { + aggregationRequest: AggregationRequest.filterSensitiveLog(obj.aggregationRequest), + }), + }); +} + +export interface ListFindingAggregationsResponse { + /** + *

                                  The type of aggregation to perform.

                                  + */ + aggregationType: AggregationType | string | undefined; + + /** + *

                                  Objects that contain the results of an aggregation operation.

                                  + */ + responses?: AggregationResponse[]; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListFindingAggregationsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFindingAggregationsResponse): any => ({ + ...obj, + ...(obj.responses && { responses: obj.responses.map((item) => AggregationResponse.filterSensitiveLog(item)) }), + }); +} + +export enum SortField { + AWS_ACCOUNT_ID = "AWS_ACCOUNT_ID", + COMPONENT_TYPE = "COMPONENT_TYPE", + ECR_IMAGE_PUSHED_AT = "ECR_IMAGE_PUSHED_AT", + ECR_IMAGE_REGISTRY = "ECR_IMAGE_REGISTRY", + ECR_IMAGE_REPOSITORY_NAME = "ECR_IMAGE_REPOSITORY_NAME", + FINDING_STATUS = "FINDING_STATUS", + FINDING_TYPE = "FINDING_TYPE", + FIRST_OBSERVED_AT = "FIRST_OBSERVED_AT", + INSPECTOR_SCORE = "INSPECTOR_SCORE", + LAST_OBSERVED_AT = "LAST_OBSERVED_AT", + NETWORK_PROTOCOL = "NETWORK_PROTOCOL", + RESOURCE_TYPE = "RESOURCE_TYPE", + SEVERITY = "SEVERITY", + VENDOR_SEVERITY = "VENDOR_SEVERITY", + VULNERABILITY_ID = "VULNERABILITY_ID", + VULNERABILITY_SOURCE = "VULNERABILITY_SOURCE", +} + +/** + *

                                  Details about the criteria used to sort finding results.

                                  + */ +export interface SortCriteria { + /** + *

                                  The finding detail field by which results are sorted.

                                  + */ + field: SortField | string | undefined; + + /** + *

                                  The order by which findings are sorted.

                                  + */ + sortOrder: SortOrder | string | undefined; +} + +export namespace SortCriteria { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SortCriteria): any => ({ + ...obj, + }); +} + +export interface ListFindingsRequest { + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  Details on the filters to apply to your finding results.

                                  + */ + filterCriteria?: FilterCriteria; + + /** + *

                                  Details on the sort criteria to apply to your finding results.

                                  + */ + sortCriteria?: SortCriteria; +} + +export namespace ListFindingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFindingsRequest): any => ({ + ...obj, + }); +} + +export interface ListFindingsResponse { + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  Contains details on the findings in your environment.

                                  + */ + findings?: Finding[]; +} + +export namespace ListFindingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListFindingsResponse): any => ({ + ...obj, + }); +} + +export interface ListMembersRequest { + /** + *

                                  Specifies whether to list only currently associated members if True or to + * list all members within the organization if False.

                                  + */ + onlyAssociated?: boolean; + + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; +} + +export namespace ListMembersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMembersRequest): any => ({ + ...obj, + }); +} + +export interface ListMembersResponse { + /** + *

                                  An object that contains details for each member account.

                                  + */ + members?: Member[]; + + /** + *

                                  The pagination parameter to be used on the next list operation to retrieve more + * items.

                                  + */ + nextToken?: string; +} + +export namespace ListMembersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListMembersResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                                  The Amazon resource number (ARN) of the resource to list tags of.

                                  + */ + resourceArn: string | undefined; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                                  The tags associated with the resource.

                                  + */ + tags?: { [key: string]: string }; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface ListUsageTotalsRequest { + /** + *

                                  The maximum number of results to return in the response.

                                  + */ + maxResults?: number; + + /** + *

                                  A token to use for paginating results that are returned in the response. Set the value + * of this parameter to null for the first request to a list action. For subsequent calls, use + * the NextToken value returned from the previous request to continue listing + * results after the first page.

                                  + */ + nextToken?: string; + + /** + *

                                  The Amazon Web Services account IDs to retrieve usage totals for.

                                  + */ + accountIds?: string[]; +} + +export namespace ListUsageTotalsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsageTotalsRequest): any => ({ + ...obj, + }); +} + +export enum UsageType { + EC2_INSTANCE_HOURS = "EC2_INSTANCE_HOURS", + ECR_INITIAL_SCAN = "ECR_INITIAL_SCAN", + ECR_RESCAN = "ECR_RESCAN", +} + +/** + *

                                  Contains usage information about the cost of Amazon Inspector operation.

                                  + */ +export interface Usage { + /** + *

                                  The type scan.

                                  + */ + type?: UsageType | string; + + /** + *

                                  The total of usage.

                                  + */ + total?: number; + + /** + *

                                  The estimated monthly cost of Amazon Inspector.

                                  + */ + estimatedMonthlyCost?: number; + + /** + *

                                  The currency type used when calculating usage data.

                                  + */ + currency?: Currency | string; +} + +export namespace Usage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Usage): any => ({ + ...obj, + }); +} + +/** + *

                                  The total of usage for an account ID.

                                  + */ +export interface UsageTotal { + /** + *

                                  The account ID of the account that usage data was retrieved for.

                                  + */ + accountId?: string; + + /** + *

                                  An object representing the total usage for an account.

                                  + */ + usage?: Usage[]; +} + +export namespace UsageTotal { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UsageTotal): any => ({ + ...obj, + }); +} + +export interface ListUsageTotalsResponse { + /** + *

                                  The pagination parameter to be used on the next list operation to retrieve more items.

                                  + */ + nextToken?: string; + + /** + *

                                  An object with details on the total usage for the requested account.

                                  + */ + totals?: UsageTotal[]; +} + +export namespace ListUsageTotalsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUsageTotalsResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) of the resource to apply a tag to.

                                  + */ + resourceArn: string | undefined; + + /** + *

                                  The tags to be added to a resource.

                                  + */ + tags: { [key: string]: string } | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) for the resource to remove tags from.

                                  + */ + resourceArn: string | undefined; + + /** + *

                                  The tag keys to remove from the resource.

                                  + */ + tagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateFilterRequest { + /** + *

                                  Specifies the action that is to be applied to the findings that match the filter.

                                  + */ + action?: FilterAction | string; + + /** + *

                                  A description of the filter.

                                  + */ + description?: string; + + /** + *

                                  Defines the criteria to be update in the filter.

                                  + */ + filterCriteria?: FilterCriteria; + + /** + *

                                  The name of the filter.

                                  + */ + name?: string; + + /** + *

                                  The Amazon Resource Number (ARN) of the filter to update.

                                  + */ + filterArn: string | undefined; +} + +export namespace UpdateFilterRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateFilterRequest): any => ({ + ...obj, + }); +} + +export interface UpdateFilterResponse { + /** + *

                                  The Amazon Resource Number (ARN) of the successfully updated filter.

                                  + */ + arn: string | undefined; +} + +export namespace UpdateFilterResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateFilterResponse): any => ({ + ...obj, + }); +} + +export interface UpdateOrganizationConfigurationRequest { + /** + *

                                  Defines which scan types are enabled automatically for new members of your Amazon Inspector organization.

                                  + */ + autoEnable: AutoEnable | undefined; +} + +export namespace UpdateOrganizationConfigurationRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateOrganizationConfigurationRequest): any => ({ + ...obj, + }); +} + +export interface UpdateOrganizationConfigurationResponse { + /** + *

                                  The updated status of scan types automatically enabled for new members of your Amazon Inspector organization.

                                  + */ + autoEnable: AutoEnable | undefined; +} + +export namespace UpdateOrganizationConfigurationResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateOrganizationConfigurationResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-inspector2/src/pagination/Interfaces.ts b/clients/client-inspector2/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..f396c7339a02 --- /dev/null +++ b/clients/client-inspector2/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; + +export interface Inspector2PaginationConfiguration extends PaginationConfiguration { + client: Inspector2 | Inspector2Client; +} diff --git a/clients/client-inspector2/src/pagination/ListAccountPermissionsPaginator.ts b/clients/client-inspector2/src/pagination/ListAccountPermissionsPaginator.ts new file mode 100644 index 000000000000..62538c6bca1d --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListAccountPermissionsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListAccountPermissionsCommand, + ListAccountPermissionsCommandInput, + ListAccountPermissionsCommandOutput, +} from "../commands/ListAccountPermissionsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListAccountPermissionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListAccountPermissionsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListAccountPermissionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listAccountPermissions(input, ...args); +}; +export async function* paginateListAccountPermissions( + config: Inspector2PaginationConfiguration, + input: ListAccountPermissionsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListAccountPermissionsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListCoveragePaginator.ts b/clients/client-inspector2/src/pagination/ListCoveragePaginator.ts new file mode 100644 index 000000000000..5c3b7c85740b --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListCoveragePaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListCoverageCommand, + ListCoverageCommandInput, + ListCoverageCommandOutput, +} from "../commands/ListCoverageCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListCoverageCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListCoverageCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListCoverageCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listCoverage(input, ...args); +}; +export async function* paginateListCoverage( + config: Inspector2PaginationConfiguration, + input: ListCoverageCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListCoverageCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListCoverageStatisticsPaginator.ts b/clients/client-inspector2/src/pagination/ListCoverageStatisticsPaginator.ts new file mode 100644 index 000000000000..69cf8cec2166 --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListCoverageStatisticsPaginator.ts @@ -0,0 +1,58 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListCoverageStatisticsCommand, + ListCoverageStatisticsCommandInput, + ListCoverageStatisticsCommandOutput, +} from "../commands/ListCoverageStatisticsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListCoverageStatisticsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListCoverageStatisticsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListCoverageStatisticsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listCoverageStatistics(input, ...args); +}; +export async function* paginateListCoverageStatistics( + config: Inspector2PaginationConfiguration, + input: ListCoverageStatisticsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListCoverageStatisticsCommandOutput; + while (hasNext) { + input.nextToken = token; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListDelegatedAdminAccountsPaginator.ts b/clients/client-inspector2/src/pagination/ListDelegatedAdminAccountsPaginator.ts new file mode 100644 index 000000000000..bc986d06c9d8 --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListDelegatedAdminAccountsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListDelegatedAdminAccountsCommand, + ListDelegatedAdminAccountsCommandInput, + ListDelegatedAdminAccountsCommandOutput, +} from "../commands/ListDelegatedAdminAccountsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListDelegatedAdminAccountsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListDelegatedAdminAccountsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListDelegatedAdminAccountsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listDelegatedAdminAccounts(input, ...args); +}; +export async function* paginateListDelegatedAdminAccounts( + config: Inspector2PaginationConfiguration, + input: ListDelegatedAdminAccountsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListDelegatedAdminAccountsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListFiltersPaginator.ts b/clients/client-inspector2/src/pagination/ListFiltersPaginator.ts new file mode 100644 index 000000000000..cc223fed90f2 --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListFiltersPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListFiltersCommand, ListFiltersCommandInput, ListFiltersCommandOutput } from "../commands/ListFiltersCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListFiltersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListFiltersCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListFiltersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listFilters(input, ...args); +}; +export async function* paginateListFilters( + config: Inspector2PaginationConfiguration, + input: ListFiltersCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListFiltersCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListFindingAggregationsPaginator.ts b/clients/client-inspector2/src/pagination/ListFindingAggregationsPaginator.ts new file mode 100644 index 000000000000..5f62dd2765df --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListFindingAggregationsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListFindingAggregationsCommand, + ListFindingAggregationsCommandInput, + ListFindingAggregationsCommandOutput, +} from "../commands/ListFindingAggregationsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListFindingAggregationsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListFindingAggregationsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListFindingAggregationsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listFindingAggregations(input, ...args); +}; +export async function* paginateListFindingAggregations( + config: Inspector2PaginationConfiguration, + input: ListFindingAggregationsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListFindingAggregationsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListFindingsPaginator.ts b/clients/client-inspector2/src/pagination/ListFindingsPaginator.ts new file mode 100644 index 000000000000..82790b454fa3 --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListFindingsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListFindingsCommand, + ListFindingsCommandInput, + ListFindingsCommandOutput, +} from "../commands/ListFindingsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListFindingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListFindingsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListFindingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listFindings(input, ...args); +}; +export async function* paginateListFindings( + config: Inspector2PaginationConfiguration, + input: ListFindingsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListFindingsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListMembersPaginator.ts b/clients/client-inspector2/src/pagination/ListMembersPaginator.ts new file mode 100644 index 000000000000..e8b3b59ab216 --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListMembersPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListMembersCommand, ListMembersCommandInput, ListMembersCommandOutput } from "../commands/ListMembersCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListMembersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListMembersCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListMembersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listMembers(input, ...args); +}; +export async function* paginateListMembers( + config: Inspector2PaginationConfiguration, + input: ListMembersCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListMembersCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/ListUsageTotalsPaginator.ts b/clients/client-inspector2/src/pagination/ListUsageTotalsPaginator.ts new file mode 100644 index 000000000000..aeb33d284a5f --- /dev/null +++ b/clients/client-inspector2/src/pagination/ListUsageTotalsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListUsageTotalsCommand, + ListUsageTotalsCommandInput, + ListUsageTotalsCommandOutput, +} from "../commands/ListUsageTotalsCommand"; +import { Inspector2 } from "../Inspector2"; +import { Inspector2Client } from "../Inspector2Client"; +import { Inspector2PaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: Inspector2Client, + input: ListUsageTotalsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListUsageTotalsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Inspector2, + input: ListUsageTotalsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listUsageTotals(input, ...args); +}; +export async function* paginateListUsageTotals( + config: Inspector2PaginationConfiguration, + input: ListUsageTotalsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListUsageTotalsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof Inspector2) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof Inspector2Client) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Inspector2 | Inspector2Client"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-inspector2/src/pagination/index.ts b/clients/client-inspector2/src/pagination/index.ts new file mode 100644 index 000000000000..9580af7753ca --- /dev/null +++ b/clients/client-inspector2/src/pagination/index.ts @@ -0,0 +1,10 @@ +export * from "./Interfaces"; +export * from "./ListAccountPermissionsPaginator"; +export * from "./ListCoveragePaginator"; +export * from "./ListCoverageStatisticsPaginator"; +export * from "./ListDelegatedAdminAccountsPaginator"; +export * from "./ListFiltersPaginator"; +export * from "./ListFindingAggregationsPaginator"; +export * from "./ListFindingsPaginator"; +export * from "./ListMembersPaginator"; +export * from "./ListUsageTotalsPaginator"; diff --git a/clients/client-inspector2/src/protocols/Aws_restJson1.ts b/clients/client-inspector2/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..2a2bf1fb5ebc --- /dev/null +++ b/clients/client-inspector2/src/protocols/Aws_restJson1.ts @@ -0,0 +1,5747 @@ +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, + expectLong as __expectLong, + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectObject as __expectObject, + expectString as __expectString, + expectUnion as __expectUnion, + extendedEncodeURIComponent as __extendedEncodeURIComponent, + limitedParseDouble as __limitedParseDouble, + parseEpochTimestamp as __parseEpochTimestamp, + serializeFloat as __serializeFloat, + strictParseInt32 as __strictParseInt32, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; +import { v4 as generateIdempotencyToken } from "uuid"; + +import { AssociateMemberCommandInput, AssociateMemberCommandOutput } from "../commands/AssociateMemberCommand"; +import { + BatchGetAccountStatusCommandInput, + BatchGetAccountStatusCommandOutput, +} from "../commands/BatchGetAccountStatusCommand"; +import { + BatchGetFreeTrialInfoCommandInput, + BatchGetFreeTrialInfoCommandOutput, +} from "../commands/BatchGetFreeTrialInfoCommand"; +import { + CancelFindingsReportCommandInput, + CancelFindingsReportCommandOutput, +} from "../commands/CancelFindingsReportCommand"; +import { CreateFilterCommandInput, CreateFilterCommandOutput } from "../commands/CreateFilterCommand"; +import { + CreateFindingsReportCommandInput, + CreateFindingsReportCommandOutput, +} from "../commands/CreateFindingsReportCommand"; +import { DeleteFilterCommandInput, DeleteFilterCommandOutput } from "../commands/DeleteFilterCommand"; +import { + DescribeOrganizationConfigurationCommandInput, + DescribeOrganizationConfigurationCommandOutput, +} from "../commands/DescribeOrganizationConfigurationCommand"; +import { DisableCommandInput, DisableCommandOutput } from "../commands/DisableCommand"; +import { + DisableDelegatedAdminAccountCommandInput, + DisableDelegatedAdminAccountCommandOutput, +} from "../commands/DisableDelegatedAdminAccountCommand"; +import { DisassociateMemberCommandInput, DisassociateMemberCommandOutput } from "../commands/DisassociateMemberCommand"; +import { EnableCommandInput, EnableCommandOutput } from "../commands/EnableCommand"; +import { + EnableDelegatedAdminAccountCommandInput, + EnableDelegatedAdminAccountCommandOutput, +} from "../commands/EnableDelegatedAdminAccountCommand"; +import { + GetDelegatedAdminAccountCommandInput, + GetDelegatedAdminAccountCommandOutput, +} from "../commands/GetDelegatedAdminAccountCommand"; +import { + GetFindingsReportStatusCommandInput, + GetFindingsReportStatusCommandOutput, +} from "../commands/GetFindingsReportStatusCommand"; +import { GetMemberCommandInput, GetMemberCommandOutput } from "../commands/GetMemberCommand"; +import { + ListAccountPermissionsCommandInput, + ListAccountPermissionsCommandOutput, +} from "../commands/ListAccountPermissionsCommand"; +import { ListCoverageCommandInput, ListCoverageCommandOutput } from "../commands/ListCoverageCommand"; +import { + ListCoverageStatisticsCommandInput, + ListCoverageStatisticsCommandOutput, +} from "../commands/ListCoverageStatisticsCommand"; +import { + ListDelegatedAdminAccountsCommandInput, + ListDelegatedAdminAccountsCommandOutput, +} from "../commands/ListDelegatedAdminAccountsCommand"; +import { ListFiltersCommandInput, ListFiltersCommandOutput } from "../commands/ListFiltersCommand"; +import { + ListFindingAggregationsCommandInput, + ListFindingAggregationsCommandOutput, +} from "../commands/ListFindingAggregationsCommand"; +import { ListFindingsCommandInput, ListFindingsCommandOutput } from "../commands/ListFindingsCommand"; +import { ListMembersCommandInput, ListMembersCommandOutput } from "../commands/ListMembersCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { ListUsageTotalsCommandInput, ListUsageTotalsCommandOutput } from "../commands/ListUsageTotalsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateFilterCommandInput, UpdateFilterCommandOutput } from "../commands/UpdateFilterCommand"; +import { + UpdateOrganizationConfigurationCommandInput, + UpdateOrganizationConfigurationCommandOutput, +} from "../commands/UpdateOrganizationConfigurationCommand"; +import { + AccessDeniedException, + Account, + AccountAggregation, + AccountAggregationResponse, + AccountState, + AggregationRequest, + AggregationResponse, + AmiAggregation, + AmiAggregationResponse, + AutoEnable, + AwsEc2InstanceDetails, + AwsEcrContainerAggregation, + AwsEcrContainerAggregationResponse, + AwsEcrContainerImageDetails, + BadRequestException, + ConflictException, + Counts, + CoverageFilterCriteria, + CoverageMapFilter, + CoverageStringFilter, + CoveredResource, + CvssScore, + CvssScoreAdjustment, + CvssScoreDetails, + DateFilter, + DelegatedAdmin, + DelegatedAdminAccount, + Destination, + Ec2InstanceAggregation, + Ec2InstanceAggregationResponse, + Ec2Metadata, + EcrContainerImageMetadata, + EcrRepositoryMetadata, + FailedAccount, + Filter, + FilterCriteria, + Finding, + FindingTypeAggregation, + FindingTypeAggregationResponse, + FreeTrialAccountInfo, + FreeTrialInfo, + FreeTrialInfoError, + ImageLayerAggregation, + ImageLayerAggregationResponse, + InspectorScoreDetails, + InternalServerException, + MapFilter, + Member, + NetworkPath, + NetworkReachabilityDetails, + NumberFilter, + PackageAggregation, + PackageAggregationResponse, + PackageFilter, + PackageVulnerabilityDetails, + Permission, + PortRange, + PortRangeFilter, + Recommendation, + Remediation, + RepositoryAggregation, + RepositoryAggregationResponse, + Resource, + ResourceDetails, + ResourceNotFoundException, + ResourceScanMetadata, + ResourceScanType, + ResourceState, + ResourceStatus, + ScanStatus, + ServiceQuotaExceededException, + SeverityCounts, + SortCriteria, + State, + Step, + StringFilter, + ThrottlingException, + TitleAggregation, + TitleAggregationResponse, + Usage, + UsageTotal, + ValidationException, + ValidationExceptionField, + VulnerablePackage, +} from "../models/models_0"; + +export const serializeAws_restJson1AssociateMemberCommand = async ( + input: AssociateMemberCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/members/associate"; + let body: any; + body = JSON.stringify({ + ...(input.accountId !== undefined && input.accountId !== null && { accountId: input.accountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1BatchGetAccountStatusCommand = async ( + input: BatchGetAccountStatusCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/status/batch/get"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { accountIds: serializeAws_restJson1AccountIdSet(input.accountIds, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1BatchGetFreeTrialInfoCommand = async ( + input: BatchGetFreeTrialInfoCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/freetrialinfo/batchget"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { + accountIds: serializeAws_restJson1MeteringAccountIdList(input.accountIds, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CancelFindingsReportCommand = async ( + input: CancelFindingsReportCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/reporting/cancel"; + let body: any; + body = JSON.stringify({ + ...(input.reportId !== undefined && input.reportId !== null && { reportId: input.reportId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateFilterCommand = async ( + input: CreateFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/filters/create"; + let body: any; + body = JSON.stringify({ + ...(input.action !== undefined && input.action !== null && { action: input.action }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1FilterCriteria(input.filterCriteria, context), + }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateFindingsReportCommand = async ( + input: CreateFindingsReportCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/reporting/create"; + let body: any; + body = JSON.stringify({ + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1FilterCriteria(input.filterCriteria, context), + }), + ...(input.reportFormat !== undefined && input.reportFormat !== null && { reportFormat: input.reportFormat }), + ...(input.s3Destination !== undefined && + input.s3Destination !== null && { + s3Destination: serializeAws_restJson1Destination(input.s3Destination, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteFilterCommand = async ( + input: DeleteFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/filters/delete"; + let body: any; + body = JSON.stringify({ + ...(input.arn !== undefined && input.arn !== null && { arn: input.arn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DescribeOrganizationConfigurationCommand = async ( + input: DescribeOrganizationConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/organizationconfiguration/describe"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisableCommand = async ( + input: DisableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/disable"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { accountIds: serializeAws_restJson1AccountIdSet(input.accountIds, context) }), + ...(input.resourceTypes !== undefined && + input.resourceTypes !== null && { + resourceTypes: serializeAws_restJson1DisableResourceTypeList(input.resourceTypes, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisableDelegatedAdminAccountCommand = async ( + input: DisableDelegatedAdminAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/delegatedadminaccounts/disable"; + let body: any; + body = JSON.stringify({ + ...(input.delegatedAdminAccountId !== undefined && + input.delegatedAdminAccountId !== null && { delegatedAdminAccountId: input.delegatedAdminAccountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisassociateMemberCommand = async ( + input: DisassociateMemberCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/members/disassociate"; + let body: any; + body = JSON.stringify({ + ...(input.accountId !== undefined && input.accountId !== null && { accountId: input.accountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1EnableCommand = async ( + input: EnableCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/enable"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { accountIds: serializeAws_restJson1AccountIdSet(input.accountIds, context) }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.resourceTypes !== undefined && + input.resourceTypes !== null && { + resourceTypes: serializeAws_restJson1EnableResourceTypeList(input.resourceTypes, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1EnableDelegatedAdminAccountCommand = async ( + input: EnableDelegatedAdminAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/delegatedadminaccounts/enable"; + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.delegatedAdminAccountId !== undefined && + input.delegatedAdminAccountId !== null && { delegatedAdminAccountId: input.delegatedAdminAccountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetDelegatedAdminAccountCommand = async ( + input: GetDelegatedAdminAccountCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/delegatedadminaccounts/get"; + let body: any; + body = ""; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetFindingsReportStatusCommand = async ( + input: GetFindingsReportStatusCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/reporting/status/get"; + let body: any; + body = JSON.stringify({ + ...(input.reportId !== undefined && input.reportId !== null && { reportId: input.reportId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetMemberCommand = async ( + input: GetMemberCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/members/get"; + let body: any; + body = JSON.stringify({ + ...(input.accountId !== undefined && input.accountId !== null && { accountId: input.accountId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListAccountPermissionsCommand = async ( + input: ListAccountPermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/accountpermissions/list"; + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.service !== undefined && input.service !== null && { service: input.service }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListCoverageCommand = async ( + input: ListCoverageCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/coverage/list"; + let body: any; + body = JSON.stringify({ + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1CoverageFilterCriteria(input.filterCriteria, context), + }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListCoverageStatisticsCommand = async ( + input: ListCoverageStatisticsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/coverage/statistics/list"; + let body: any; + body = JSON.stringify({ + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1CoverageFilterCriteria(input.filterCriteria, context), + }), + ...(input.groupBy !== undefined && input.groupBy !== null && { groupBy: input.groupBy }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListDelegatedAdminAccountsCommand = async ( + input: ListDelegatedAdminAccountsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/delegatedadminaccounts/list"; + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListFiltersCommand = async ( + input: ListFiltersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/filters/list"; + let body: any; + body = JSON.stringify({ + ...(input.action !== undefined && input.action !== null && { action: input.action }), + ...(input.arns !== undefined && + input.arns !== null && { arns: serializeAws_restJson1FilterArnList(input.arns, context) }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListFindingAggregationsCommand = async ( + input: ListFindingAggregationsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/findings/aggregation/list"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { accountIds: serializeAws_restJson1StringFilterList(input.accountIds, context) }), + ...(input.aggregationRequest !== undefined && + input.aggregationRequest !== null && { + aggregationRequest: serializeAws_restJson1AggregationRequest(input.aggregationRequest, context), + }), + ...(input.aggregationType !== undefined && + input.aggregationType !== null && { aggregationType: input.aggregationType }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListFindingsCommand = async ( + input: ListFindingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/findings/list"; + let body: any; + body = JSON.stringify({ + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1FilterCriteria(input.filterCriteria, context), + }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.sortCriteria !== undefined && + input.sortCriteria !== null && { sortCriteria: serializeAws_restJson1SortCriteria(input.sortCriteria, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListMembersCommand = async ( + input: ListMembersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/members/list"; + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.onlyAssociated !== undefined && + input.onlyAssociated !== null && { onlyAssociated: input.onlyAssociated }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListUsageTotalsCommand = async ( + input: ListUsageTotalsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/usage/list"; + let body: any; + body = JSON.stringify({ + ...(input.accountIds !== undefined && + input.accountIds !== null && { accountIds: serializeAws_restJson1UsageAccountIdList(input.accountIds, context) }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace("{resourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + const query: any = { + ...(input.tagKeys !== undefined && { tagKeys: (input.tagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateFilterCommand = async ( + input: UpdateFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/filters/update"; + let body: any; + body = JSON.stringify({ + ...(input.action !== undefined && input.action !== null && { action: input.action }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.filterArn !== undefined && input.filterArn !== null && { filterArn: input.filterArn }), + ...(input.filterCriteria !== undefined && + input.filterCriteria !== null && { + filterCriteria: serializeAws_restJson1FilterCriteria(input.filterCriteria, context), + }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateOrganizationConfigurationCommand = async ( + input: UpdateOrganizationConfigurationCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/organizationconfiguration/update"; + let body: any; + body = JSON.stringify({ + ...(input.autoEnable !== undefined && + input.autoEnable !== null && { autoEnable: serializeAws_restJson1AutoEnable(input.autoEnable, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1AssociateMemberCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateMemberCommandError(output, context); + } + const contents: AssociateMemberCommandOutput = { + $metadata: deserializeMetadata(output), + accountId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accountId !== undefined && data.accountId !== null) { + contents.accountId = __expectString(data.accountId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateMemberCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1BatchGetAccountStatusCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchGetAccountStatusCommandError(output, context); + } + const contents: BatchGetAccountStatusCommandOutput = { + $metadata: deserializeMetadata(output), + accounts: undefined, + failedAccounts: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accounts !== undefined && data.accounts !== null) { + contents.accounts = deserializeAws_restJson1AccountStateList(data.accounts, context); + } + if (data.failedAccounts !== undefined && data.failedAccounts !== null) { + contents.failedAccounts = deserializeAws_restJson1FailedAccountList(data.failedAccounts, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchGetAccountStatusCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1BatchGetFreeTrialInfoCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchGetFreeTrialInfoCommandError(output, context); + } + const contents: BatchGetFreeTrialInfoCommandOutput = { + $metadata: deserializeMetadata(output), + accounts: undefined, + failedAccounts: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accounts !== undefined && data.accounts !== null) { + contents.accounts = deserializeAws_restJson1FreeTrialAccountInfoList(data.accounts, context); + } + if (data.failedAccounts !== undefined && data.failedAccounts !== null) { + contents.failedAccounts = deserializeAws_restJson1FreeTrialInfoErrorList(data.failedAccounts, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchGetFreeTrialInfoCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CancelFindingsReportCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CancelFindingsReportCommandError(output, context); + } + const contents: CancelFindingsReportCommandOutput = { + $metadata: deserializeMetadata(output), + reportId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.reportId !== undefined && data.reportId !== null) { + contents.reportId = __expectString(data.reportId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CancelFindingsReportCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateFilterCommandError(output, context); + } + const contents: CreateFilterCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "BadRequestException": + case "com.amazonaws.inspector2#BadRequestException": + response = { + ...(await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.inspector2#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateFindingsReportCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateFindingsReportCommandError(output, context); + } + const contents: CreateFindingsReportCommandOutput = { + $metadata: deserializeMetadata(output), + reportId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.reportId !== undefined && data.reportId !== null) { + contents.reportId = __expectString(data.reportId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateFindingsReportCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteFilterCommandError(output, context); + } + const contents: DeleteFilterCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DescribeOrganizationConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeOrganizationConfigurationCommandError(output, context); + } + const contents: DescribeOrganizationConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + autoEnable: undefined, + maxAccountLimitReached: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.autoEnable !== undefined && data.autoEnable !== null) { + contents.autoEnable = deserializeAws_restJson1AutoEnable(data.autoEnable, context); + } + if (data.maxAccountLimitReached !== undefined && data.maxAccountLimitReached !== null) { + contents.maxAccountLimitReached = __expectBoolean(data.maxAccountLimitReached); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeOrganizationConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisableCommandError(output, context); + } + const contents: DisableCommandOutput = { + $metadata: deserializeMetadata(output), + accounts: undefined, + failedAccounts: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accounts !== undefined && data.accounts !== null) { + contents.accounts = deserializeAws_restJson1AccountList(data.accounts, context); + } + if (data.failedAccounts !== undefined && data.failedAccounts !== null) { + contents.failedAccounts = deserializeAws_restJson1FailedAccountList(data.failedAccounts, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisableDelegatedAdminAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisableDelegatedAdminAccountCommandError(output, context); + } + const contents: DisableDelegatedAdminAccountCommandOutput = { + $metadata: deserializeMetadata(output), + delegatedAdminAccountId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.delegatedAdminAccountId !== undefined && data.delegatedAdminAccountId !== null) { + contents.delegatedAdminAccountId = __expectString(data.delegatedAdminAccountId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisableDelegatedAdminAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.inspector2#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateMemberCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateMemberCommandError(output, context); + } + const contents: DisassociateMemberCommandOutput = { + $metadata: deserializeMetadata(output), + accountId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accountId !== undefined && data.accountId !== null) { + contents.accountId = __expectString(data.accountId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateMemberCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1EnableCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1EnableCommandError(output, context); + } + const contents: EnableCommandOutput = { + $metadata: deserializeMetadata(output), + accounts: undefined, + failedAccounts: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.accounts !== undefined && data.accounts !== null) { + contents.accounts = deserializeAws_restJson1AccountList(data.accounts, context); + } + if (data.failedAccounts !== undefined && data.failedAccounts !== null) { + contents.failedAccounts = deserializeAws_restJson1FailedAccountList(data.failedAccounts, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1EnableCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1EnableDelegatedAdminAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1EnableDelegatedAdminAccountCommandError(output, context); + } + const contents: EnableDelegatedAdminAccountCommandOutput = { + $metadata: deserializeMetadata(output), + delegatedAdminAccountId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.delegatedAdminAccountId !== undefined && data.delegatedAdminAccountId !== null) { + contents.delegatedAdminAccountId = __expectString(data.delegatedAdminAccountId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1EnableDelegatedAdminAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.inspector2#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetDelegatedAdminAccountCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetDelegatedAdminAccountCommandError(output, context); + } + const contents: GetDelegatedAdminAccountCommandOutput = { + $metadata: deserializeMetadata(output), + delegatedAdmin: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.delegatedAdmin !== undefined && data.delegatedAdmin !== null) { + contents.delegatedAdmin = deserializeAws_restJson1DelegatedAdmin(data.delegatedAdmin, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetDelegatedAdminAccountCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetFindingsReportStatusCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetFindingsReportStatusCommandError(output, context); + } + const contents: GetFindingsReportStatusCommandOutput = { + $metadata: deserializeMetadata(output), + destination: undefined, + errorCode: undefined, + errorMessage: undefined, + filterCriteria: undefined, + reportId: undefined, + status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.destination !== undefined && data.destination !== null) { + contents.destination = deserializeAws_restJson1Destination(data.destination, context); + } + if (data.errorCode !== undefined && data.errorCode !== null) { + contents.errorCode = __expectString(data.errorCode); + } + if (data.errorMessage !== undefined && data.errorMessage !== null) { + contents.errorMessage = __expectString(data.errorMessage); + } + if (data.filterCriteria !== undefined && data.filterCriteria !== null) { + contents.filterCriteria = deserializeAws_restJson1FilterCriteria(data.filterCriteria, context); + } + if (data.reportId !== undefined && data.reportId !== null) { + contents.reportId = __expectString(data.reportId); + } + if (data.status !== undefined && data.status !== null) { + contents.status = __expectString(data.status); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetFindingsReportStatusCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetMemberCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetMemberCommandError(output, context); + } + const contents: GetMemberCommandOutput = { + $metadata: deserializeMetadata(output), + member: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.member !== undefined && data.member !== null) { + contents.member = deserializeAws_restJson1Member(data.member, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetMemberCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListAccountPermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListAccountPermissionsCommandError(output, context); + } + const contents: ListAccountPermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + permissions: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.permissions !== undefined && data.permissions !== null) { + contents.permissions = deserializeAws_restJson1Permissions(data.permissions, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListAccountPermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListCoverageCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListCoverageCommandError(output, context); + } + const contents: ListCoverageCommandOutput = { + $metadata: deserializeMetadata(output), + coveredResources: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.coveredResources !== undefined && data.coveredResources !== null) { + contents.coveredResources = deserializeAws_restJson1CoveredResources(data.coveredResources, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListCoverageCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListCoverageStatisticsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListCoverageStatisticsCommandError(output, context); + } + const contents: ListCoverageStatisticsCommandOutput = { + $metadata: deserializeMetadata(output), + countsByGroup: undefined, + nextToken: undefined, + totalCounts: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.countsByGroup !== undefined && data.countsByGroup !== null) { + contents.countsByGroup = deserializeAws_restJson1CountsList(data.countsByGroup, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.totalCounts !== undefined && data.totalCounts !== null) { + contents.totalCounts = __expectLong(data.totalCounts); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListCoverageStatisticsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListDelegatedAdminAccountsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListDelegatedAdminAccountsCommandError(output, context); + } + const contents: ListDelegatedAdminAccountsCommandOutput = { + $metadata: deserializeMetadata(output), + delegatedAdminAccounts: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.delegatedAdminAccounts !== undefined && data.delegatedAdminAccounts !== null) { + contents.delegatedAdminAccounts = deserializeAws_restJson1DelegatedAdminAccountList( + data.delegatedAdminAccounts, + context + ); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListDelegatedAdminAccountsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListFiltersCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListFiltersCommandError(output, context); + } + const contents: ListFiltersCommandOutput = { + $metadata: deserializeMetadata(output), + filters: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.filters !== undefined && data.filters !== null) { + contents.filters = deserializeAws_restJson1FilterList(data.filters, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListFiltersCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListFindingAggregationsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListFindingAggregationsCommandError(output, context); + } + const contents: ListFindingAggregationsCommandOutput = { + $metadata: deserializeMetadata(output), + aggregationType: undefined, + nextToken: undefined, + responses: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.aggregationType !== undefined && data.aggregationType !== null) { + contents.aggregationType = __expectString(data.aggregationType); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.responses !== undefined && data.responses !== null) { + contents.responses = deserializeAws_restJson1AggregationResponseList(data.responses, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListFindingAggregationsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListFindingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListFindingsCommandError(output, context); + } + const contents: ListFindingsCommandOutput = { + $metadata: deserializeMetadata(output), + findings: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.findings !== undefined && data.findings !== null) { + contents.findings = deserializeAws_restJson1FindingList(data.findings, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListFindingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListMembersCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListMembersCommandError(output, context); + } + const contents: ListMembersCommandOutput = { + $metadata: deserializeMetadata(output), + members: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.members !== undefined && data.members !== null) { + contents.members = deserializeAws_restJson1MemberList(data.members, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListMembersCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.tags !== undefined && data.tags !== null) { + contents.tags = deserializeAws_restJson1TagMap(data.tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListUsageTotalsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListUsageTotalsCommandError(output, context); + } + const contents: ListUsageTotalsCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + totals: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.totals !== undefined && data.totals !== null) { + contents.totals = deserializeAws_restJson1UsageTotalList(data.totals, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListUsageTotalsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequestException": + case "com.amazonaws.inspector2#BadRequestException": + response = { + ...(await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateFilterCommandError(output, context); + } + const contents: UpdateFilterCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.inspector2#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateOrganizationConfigurationCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateOrganizationConfigurationCommandError(output, context); + } + const contents: UpdateOrganizationConfigurationCommandOutput = { + $metadata: deserializeMetadata(output), + autoEnable: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.autoEnable !== undefined && data.autoEnable !== null) { + contents.autoEnable = deserializeAws_restJson1AutoEnable(data.autoEnable, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateOrganizationConfigurationCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.inspector2#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.inspector2#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.inspector2#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.inspector2#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1BadRequestExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: BadRequestException = { + name: "BadRequestException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $retryable: {}, + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + retryAfterSeconds: undefined, + }; + if (parsedOutput.headers["retry-after"] !== undefined) { + contents.retryAfterSeconds = __strictParseInt32(parsedOutput.headers["retry-after"]); + } + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + return contents; +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottlingException = { + name: "ThrottlingException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + fields: undefined, + message: undefined, + reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.fields !== undefined && data.fields !== null) { + contents.fields = deserializeAws_restJson1ValidationExceptionFields(data.fields, context); + } + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } + return contents; +}; + +const serializeAws_restJson1AccountAggregation = (input: AccountAggregation, context: __SerdeContext): any => { + return { + ...(input.findingType !== undefined && input.findingType !== null && { findingType: input.findingType }), + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1AccountIdSet = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1AggregationRequest = (input: AggregationRequest, context: __SerdeContext): any => { + return AggregationRequest.visit(input, { + accountAggregation: (value) => ({ accountAggregation: serializeAws_restJson1AccountAggregation(value, context) }), + amiAggregation: (value) => ({ amiAggregation: serializeAws_restJson1AmiAggregation(value, context) }), + awsEcrContainerAggregation: (value) => ({ + awsEcrContainerAggregation: serializeAws_restJson1AwsEcrContainerAggregation(value, context), + }), + ec2InstanceAggregation: (value) => ({ + ec2InstanceAggregation: serializeAws_restJson1Ec2InstanceAggregation(value, context), + }), + findingTypeAggregation: (value) => ({ + findingTypeAggregation: serializeAws_restJson1FindingTypeAggregation(value, context), + }), + imageLayerAggregation: (value) => ({ + imageLayerAggregation: serializeAws_restJson1ImageLayerAggregation(value, context), + }), + packageAggregation: (value) => ({ packageAggregation: serializeAws_restJson1PackageAggregation(value, context) }), + repositoryAggregation: (value) => ({ + repositoryAggregation: serializeAws_restJson1RepositoryAggregation(value, context), + }), + titleAggregation: (value) => ({ titleAggregation: serializeAws_restJson1TitleAggregation(value, context) }), + _: (name, value) => ({ name: value } as any), + }); +}; + +const serializeAws_restJson1AmiAggregation = (input: AmiAggregation, context: __SerdeContext): any => { + return { + ...(input.amis !== undefined && + input.amis !== null && { amis: serializeAws_restJson1StringFilterList(input.amis, context) }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1AutoEnable = (input: AutoEnable, context: __SerdeContext): any => { + return { + ...(input.ec2 !== undefined && input.ec2 !== null && { ec2: input.ec2 }), + ...(input.ecr !== undefined && input.ecr !== null && { ecr: input.ecr }), + }; +}; + +const serializeAws_restJson1AwsEcrContainerAggregation = ( + input: AwsEcrContainerAggregation, + context: __SerdeContext +): any => { + return { + ...(input.architectures !== undefined && + input.architectures !== null && { + architectures: serializeAws_restJson1StringFilterList(input.architectures, context), + }), + ...(input.imageShas !== undefined && + input.imageShas !== null && { imageShas: serializeAws_restJson1StringFilterList(input.imageShas, context) }), + ...(input.imageTags !== undefined && + input.imageTags !== null && { imageTags: serializeAws_restJson1StringFilterList(input.imageTags, context) }), + ...(input.repositories !== undefined && + input.repositories !== null && { + repositories: serializeAws_restJson1StringFilterList(input.repositories, context), + }), + ...(input.resourceIds !== undefined && + input.resourceIds !== null && { + resourceIds: serializeAws_restJson1StringFilterList(input.resourceIds, context), + }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1CoverageFilterCriteria = (input: CoverageFilterCriteria, context: __SerdeContext): any => { + return { + ...(input.accountId !== undefined && + input.accountId !== null && { + accountId: serializeAws_restJson1CoverageStringFilterList(input.accountId, context), + }), + ...(input.ec2InstanceTags !== undefined && + input.ec2InstanceTags !== null && { + ec2InstanceTags: serializeAws_restJson1CoverageMapFilterList(input.ec2InstanceTags, context), + }), + ...(input.ecrImageTags !== undefined && + input.ecrImageTags !== null && { + ecrImageTags: serializeAws_restJson1CoverageStringFilterList(input.ecrImageTags, context), + }), + ...(input.ecrRepositoryName !== undefined && + input.ecrRepositoryName !== null && { + ecrRepositoryName: serializeAws_restJson1CoverageStringFilterList(input.ecrRepositoryName, context), + }), + ...(input.resourceId !== undefined && + input.resourceId !== null && { + resourceId: serializeAws_restJson1CoverageStringFilterList(input.resourceId, context), + }), + ...(input.resourceType !== undefined && + input.resourceType !== null && { + resourceType: serializeAws_restJson1CoverageStringFilterList(input.resourceType, context), + }), + ...(input.scanStatusCode !== undefined && + input.scanStatusCode !== null && { + scanStatusCode: serializeAws_restJson1CoverageStringFilterList(input.scanStatusCode, context), + }), + ...(input.scanStatusReason !== undefined && + input.scanStatusReason !== null && { + scanStatusReason: serializeAws_restJson1CoverageStringFilterList(input.scanStatusReason, context), + }), + ...(input.scanType !== undefined && + input.scanType !== null && { scanType: serializeAws_restJson1CoverageStringFilterList(input.scanType, context) }), + }; +}; + +const serializeAws_restJson1CoverageMapFilter = (input: CoverageMapFilter, context: __SerdeContext): any => { + return { + ...(input.comparison !== undefined && input.comparison !== null && { comparison: input.comparison }), + ...(input.key !== undefined && input.key !== null && { key: input.key }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + +const serializeAws_restJson1CoverageMapFilterList = (input: CoverageMapFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1CoverageMapFilter(entry, context); + }); +}; + +const serializeAws_restJson1CoverageStringFilter = (input: CoverageStringFilter, context: __SerdeContext): any => { + return { + ...(input.comparison !== undefined && input.comparison !== null && { comparison: input.comparison }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + +const serializeAws_restJson1CoverageStringFilterList = ( + input: CoverageStringFilter[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1CoverageStringFilter(entry, context); + }); +}; + +const serializeAws_restJson1DateFilter = (input: DateFilter, context: __SerdeContext): any => { + return { + ...(input.endInclusive !== undefined && + input.endInclusive !== null && { endInclusive: Math.round(input.endInclusive.getTime() / 1000) }), + ...(input.startInclusive !== undefined && + input.startInclusive !== null && { startInclusive: Math.round(input.startInclusive.getTime() / 1000) }), + }; +}; + +const serializeAws_restJson1DateFilterList = (input: DateFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1DateFilter(entry, context); + }); +}; + +const serializeAws_restJson1Destination = (input: Destination, context: __SerdeContext): any => { + return { + ...(input.bucketName !== undefined && input.bucketName !== null && { bucketName: input.bucketName }), + ...(input.keyPrefix !== undefined && input.keyPrefix !== null && { keyPrefix: input.keyPrefix }), + ...(input.kmsKeyArn !== undefined && input.kmsKeyArn !== null && { kmsKeyArn: input.kmsKeyArn }), + }; +}; + +const serializeAws_restJson1DisableResourceTypeList = ( + input: (ResourceScanType | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1Ec2InstanceAggregation = (input: Ec2InstanceAggregation, context: __SerdeContext): any => { + return { + ...(input.amis !== undefined && + input.amis !== null && { amis: serializeAws_restJson1StringFilterList(input.amis, context) }), + ...(input.instanceIds !== undefined && + input.instanceIds !== null && { + instanceIds: serializeAws_restJson1StringFilterList(input.instanceIds, context), + }), + ...(input.instanceTags !== undefined && + input.instanceTags !== null && { + instanceTags: serializeAws_restJson1MapFilterList(input.instanceTags, context), + }), + ...(input.operatingSystems !== undefined && + input.operatingSystems !== null && { + operatingSystems: serializeAws_restJson1StringFilterList(input.operatingSystems, context), + }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1EnableResourceTypeList = ( + input: (ResourceScanType | string)[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1FilterArnList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1FilterCriteria = (input: FilterCriteria, context: __SerdeContext): any => { + return { + ...(input.awsAccountId !== undefined && + input.awsAccountId !== null && { + awsAccountId: serializeAws_restJson1StringFilterList(input.awsAccountId, context), + }), + ...(input.componentId !== undefined && + input.componentId !== null && { + componentId: serializeAws_restJson1StringFilterList(input.componentId, context), + }), + ...(input.componentType !== undefined && + input.componentType !== null && { + componentType: serializeAws_restJson1StringFilterList(input.componentType, context), + }), + ...(input.ec2InstanceImageId !== undefined && + input.ec2InstanceImageId !== null && { + ec2InstanceImageId: serializeAws_restJson1StringFilterList(input.ec2InstanceImageId, context), + }), + ...(input.ec2InstanceSubnetId !== undefined && + input.ec2InstanceSubnetId !== null && { + ec2InstanceSubnetId: serializeAws_restJson1StringFilterList(input.ec2InstanceSubnetId, context), + }), + ...(input.ec2InstanceVpcId !== undefined && + input.ec2InstanceVpcId !== null && { + ec2InstanceVpcId: serializeAws_restJson1StringFilterList(input.ec2InstanceVpcId, context), + }), + ...(input.ecrImageArchitecture !== undefined && + input.ecrImageArchitecture !== null && { + ecrImageArchitecture: serializeAws_restJson1StringFilterList(input.ecrImageArchitecture, context), + }), + ...(input.ecrImageHash !== undefined && + input.ecrImageHash !== null && { + ecrImageHash: serializeAws_restJson1StringFilterList(input.ecrImageHash, context), + }), + ...(input.ecrImagePushedAt !== undefined && + input.ecrImagePushedAt !== null && { + ecrImagePushedAt: serializeAws_restJson1DateFilterList(input.ecrImagePushedAt, context), + }), + ...(input.ecrImageRegistry !== undefined && + input.ecrImageRegistry !== null && { + ecrImageRegistry: serializeAws_restJson1StringFilterList(input.ecrImageRegistry, context), + }), + ...(input.ecrImageRepositoryName !== undefined && + input.ecrImageRepositoryName !== null && { + ecrImageRepositoryName: serializeAws_restJson1StringFilterList(input.ecrImageRepositoryName, context), + }), + ...(input.ecrImageTags !== undefined && + input.ecrImageTags !== null && { + ecrImageTags: serializeAws_restJson1StringFilterList(input.ecrImageTags, context), + }), + ...(input.findingArn !== undefined && + input.findingArn !== null && { findingArn: serializeAws_restJson1StringFilterList(input.findingArn, context) }), + ...(input.findingStatus !== undefined && + input.findingStatus !== null && { + findingStatus: serializeAws_restJson1StringFilterList(input.findingStatus, context), + }), + ...(input.findingType !== undefined && + input.findingType !== null && { + findingType: serializeAws_restJson1StringFilterList(input.findingType, context), + }), + ...(input.firstObservedAt !== undefined && + input.firstObservedAt !== null && { + firstObservedAt: serializeAws_restJson1DateFilterList(input.firstObservedAt, context), + }), + ...(input.inspectorScore !== undefined && + input.inspectorScore !== null && { + inspectorScore: serializeAws_restJson1NumberFilterList(input.inspectorScore, context), + }), + ...(input.lastObservedAt !== undefined && + input.lastObservedAt !== null && { + lastObservedAt: serializeAws_restJson1DateFilterList(input.lastObservedAt, context), + }), + ...(input.networkProtocol !== undefined && + input.networkProtocol !== null && { + networkProtocol: serializeAws_restJson1StringFilterList(input.networkProtocol, context), + }), + ...(input.portRange !== undefined && + input.portRange !== null && { portRange: serializeAws_restJson1PortRangeFilterList(input.portRange, context) }), + ...(input.relatedVulnerabilities !== undefined && + input.relatedVulnerabilities !== null && { + relatedVulnerabilities: serializeAws_restJson1StringFilterList(input.relatedVulnerabilities, context), + }), + ...(input.resourceId !== undefined && + input.resourceId !== null && { resourceId: serializeAws_restJson1StringFilterList(input.resourceId, context) }), + ...(input.resourceTags !== undefined && + input.resourceTags !== null && { + resourceTags: serializeAws_restJson1MapFilterList(input.resourceTags, context), + }), + ...(input.resourceType !== undefined && + input.resourceType !== null && { + resourceType: serializeAws_restJson1StringFilterList(input.resourceType, context), + }), + ...(input.severity !== undefined && + input.severity !== null && { severity: serializeAws_restJson1StringFilterList(input.severity, context) }), + ...(input.title !== undefined && + input.title !== null && { title: serializeAws_restJson1StringFilterList(input.title, context) }), + ...(input.updatedAt !== undefined && + input.updatedAt !== null && { updatedAt: serializeAws_restJson1DateFilterList(input.updatedAt, context) }), + ...(input.vendorSeverity !== undefined && + input.vendorSeverity !== null && { + vendorSeverity: serializeAws_restJson1StringFilterList(input.vendorSeverity, context), + }), + ...(input.vulnerabilityId !== undefined && + input.vulnerabilityId !== null && { + vulnerabilityId: serializeAws_restJson1StringFilterList(input.vulnerabilityId, context), + }), + ...(input.vulnerabilitySource !== undefined && + input.vulnerabilitySource !== null && { + vulnerabilitySource: serializeAws_restJson1StringFilterList(input.vulnerabilitySource, context), + }), + ...(input.vulnerablePackages !== undefined && + input.vulnerablePackages !== null && { + vulnerablePackages: serializeAws_restJson1PackageFilterList(input.vulnerablePackages, context), + }), + }; +}; + +const serializeAws_restJson1FindingTypeAggregation = (input: FindingTypeAggregation, context: __SerdeContext): any => { + return { + ...(input.findingType !== undefined && input.findingType !== null && { findingType: input.findingType }), + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1ImageLayerAggregation = (input: ImageLayerAggregation, context: __SerdeContext): any => { + return { + ...(input.layerHashes !== undefined && + input.layerHashes !== null && { + layerHashes: serializeAws_restJson1StringFilterList(input.layerHashes, context), + }), + ...(input.repositories !== undefined && + input.repositories !== null && { + repositories: serializeAws_restJson1StringFilterList(input.repositories, context), + }), + ...(input.resourceIds !== undefined && + input.resourceIds !== null && { + resourceIds: serializeAws_restJson1StringFilterList(input.resourceIds, context), + }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1MapFilter = (input: MapFilter, context: __SerdeContext): any => { + return { + ...(input.comparison !== undefined && input.comparison !== null && { comparison: input.comparison }), + ...(input.key !== undefined && input.key !== null && { key: input.key }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + +const serializeAws_restJson1MapFilterList = (input: MapFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1MapFilter(entry, context); + }); +}; + +const serializeAws_restJson1MeteringAccountIdList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1NumberFilter = (input: NumberFilter, context: __SerdeContext): any => { + return { + ...(input.lowerInclusive !== undefined && + input.lowerInclusive !== null && { lowerInclusive: __serializeFloat(input.lowerInclusive) }), + ...(input.upperInclusive !== undefined && + input.upperInclusive !== null && { upperInclusive: __serializeFloat(input.upperInclusive) }), + }; +}; + +const serializeAws_restJson1NumberFilterList = (input: NumberFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1NumberFilter(entry, context); + }); +}; + +const serializeAws_restJson1PackageAggregation = (input: PackageAggregation, context: __SerdeContext): any => { + return { + ...(input.packageNames !== undefined && + input.packageNames !== null && { + packageNames: serializeAws_restJson1StringFilterList(input.packageNames, context), + }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1PackageFilter = (input: PackageFilter, context: __SerdeContext): any => { + return { + ...(input.architecture !== undefined && + input.architecture !== null && { architecture: serializeAws_restJson1StringFilter(input.architecture, context) }), + ...(input.epoch !== undefined && + input.epoch !== null && { epoch: serializeAws_restJson1NumberFilter(input.epoch, context) }), + ...(input.name !== undefined && + input.name !== null && { name: serializeAws_restJson1StringFilter(input.name, context) }), + ...(input.release !== undefined && + input.release !== null && { release: serializeAws_restJson1StringFilter(input.release, context) }), + ...(input.sourceLayerHash !== undefined && + input.sourceLayerHash !== null && { + sourceLayerHash: serializeAws_restJson1StringFilter(input.sourceLayerHash, context), + }), + ...(input.version !== undefined && + input.version !== null && { version: serializeAws_restJson1StringFilter(input.version, context) }), + }; +}; + +const serializeAws_restJson1PackageFilterList = (input: PackageFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PackageFilter(entry, context); + }); +}; + +const serializeAws_restJson1PortRangeFilter = (input: PortRangeFilter, context: __SerdeContext): any => { + return { + ...(input.beginInclusive !== undefined && + input.beginInclusive !== null && { beginInclusive: input.beginInclusive }), + ...(input.endInclusive !== undefined && input.endInclusive !== null && { endInclusive: input.endInclusive }), + }; +}; + +const serializeAws_restJson1PortRangeFilterList = (input: PortRangeFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PortRangeFilter(entry, context); + }); +}; + +const serializeAws_restJson1RepositoryAggregation = (input: RepositoryAggregation, context: __SerdeContext): any => { + return { + ...(input.repositories !== undefined && + input.repositories !== null && { + repositories: serializeAws_restJson1StringFilterList(input.repositories, context), + }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1SortCriteria = (input: SortCriteria, context: __SerdeContext): any => { + return { + ...(input.field !== undefined && input.field !== null && { field: input.field }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + }; +}; + +const serializeAws_restJson1StringFilter = (input: StringFilter, context: __SerdeContext): any => { + return { + ...(input.comparison !== undefined && input.comparison !== null && { comparison: input.comparison }), + ...(input.value !== undefined && input.value !== null && { value: input.value }), + }; +}; + +const serializeAws_restJson1StringFilterList = (input: StringFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1StringFilter(entry, context); + }); +}; + +const serializeAws_restJson1TagMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1TitleAggregation = (input: TitleAggregation, context: __SerdeContext): any => { + return { + ...(input.resourceType !== undefined && input.resourceType !== null && { resourceType: input.resourceType }), + ...(input.sortBy !== undefined && input.sortBy !== null && { sortBy: input.sortBy }), + ...(input.sortOrder !== undefined && input.sortOrder !== null && { sortOrder: input.sortOrder }), + ...(input.titles !== undefined && + input.titles !== null && { titles: serializeAws_restJson1StringFilterList(input.titles, context) }), + ...(input.vulnerabilityIds !== undefined && + input.vulnerabilityIds !== null && { + vulnerabilityIds: serializeAws_restJson1StringFilterList(input.vulnerabilityIds, context), + }), + }; +}; + +const serializeAws_restJson1UsageAccountIdList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const deserializeAws_restJson1Account = (output: any, context: __SerdeContext): Account => { + return { + accountId: __expectString(output.accountId), + resourceStatus: + output.resourceStatus !== undefined && output.resourceStatus !== null + ? deserializeAws_restJson1ResourceStatus(output.resourceStatus, context) + : undefined, + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_restJson1AccountAggregationResponse = ( + output: any, + context: __SerdeContext +): AccountAggregationResponse => { + return { + accountId: __expectString(output.accountId), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AccountList = (output: any, context: __SerdeContext): Account[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Account(entry, context); + }); +}; + +const deserializeAws_restJson1AccountState = (output: any, context: __SerdeContext): AccountState => { + return { + accountId: __expectString(output.accountId), + resourceState: + output.resourceState !== undefined && output.resourceState !== null + ? deserializeAws_restJson1ResourceState(output.resourceState, context) + : undefined, + state: + output.state !== undefined && output.state !== null + ? deserializeAws_restJson1State(output.state, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AccountStateList = (output: any, context: __SerdeContext): AccountState[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1AccountState(entry, context); + }); +}; + +const deserializeAws_restJson1AggregationResponse = (output: any, context: __SerdeContext): AggregationResponse => { + if (output.accountAggregation !== undefined && output.accountAggregation !== null) { + return { + accountAggregation: deserializeAws_restJson1AccountAggregationResponse(output.accountAggregation, context), + }; + } + if (output.amiAggregation !== undefined && output.amiAggregation !== null) { + return { + amiAggregation: deserializeAws_restJson1AmiAggregationResponse(output.amiAggregation, context), + }; + } + if (output.awsEcrContainerAggregation !== undefined && output.awsEcrContainerAggregation !== null) { + return { + awsEcrContainerAggregation: deserializeAws_restJson1AwsEcrContainerAggregationResponse( + output.awsEcrContainerAggregation, + context + ), + }; + } + if (output.ec2InstanceAggregation !== undefined && output.ec2InstanceAggregation !== null) { + return { + ec2InstanceAggregation: deserializeAws_restJson1Ec2InstanceAggregationResponse( + output.ec2InstanceAggregation, + context + ), + }; + } + if (output.findingTypeAggregation !== undefined && output.findingTypeAggregation !== null) { + return { + findingTypeAggregation: deserializeAws_restJson1FindingTypeAggregationResponse( + output.findingTypeAggregation, + context + ), + }; + } + if (output.imageLayerAggregation !== undefined && output.imageLayerAggregation !== null) { + return { + imageLayerAggregation: deserializeAws_restJson1ImageLayerAggregationResponse( + output.imageLayerAggregation, + context + ), + }; + } + if (output.packageAggregation !== undefined && output.packageAggregation !== null) { + return { + packageAggregation: deserializeAws_restJson1PackageAggregationResponse(output.packageAggregation, context), + }; + } + if (output.repositoryAggregation !== undefined && output.repositoryAggregation !== null) { + return { + repositoryAggregation: deserializeAws_restJson1RepositoryAggregationResponse( + output.repositoryAggregation, + context + ), + }; + } + if (output.titleAggregation !== undefined && output.titleAggregation !== null) { + return { + titleAggregation: deserializeAws_restJson1TitleAggregationResponse(output.titleAggregation, context), + }; + } + return { $unknown: Object.entries(output)[0] }; +}; + +const deserializeAws_restJson1AggregationResponseList = ( + output: any, + context: __SerdeContext +): AggregationResponse[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1AggregationResponse(__expectUnion(entry), context); + }); +}; + +const deserializeAws_restJson1AmiAggregationResponse = ( + output: any, + context: __SerdeContext +): AmiAggregationResponse => { + return { + accountId: __expectString(output.accountId), + affectedInstances: __expectLong(output.affectedInstances), + ami: __expectString(output.ami), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AutoEnable = (output: any, context: __SerdeContext): AutoEnable => { + return { + ec2: __expectBoolean(output.ec2), + ecr: __expectBoolean(output.ecr), + } as any; +}; + +const deserializeAws_restJson1AwsEc2InstanceDetails = (output: any, context: __SerdeContext): AwsEc2InstanceDetails => { + return { + iamInstanceProfileArn: __expectString(output.iamInstanceProfileArn), + imageId: __expectString(output.imageId), + ipV4Addresses: + output.ipV4Addresses !== undefined && output.ipV4Addresses !== null + ? deserializeAws_restJson1IpV4AddressList(output.ipV4Addresses, context) + : undefined, + ipV6Addresses: + output.ipV6Addresses !== undefined && output.ipV6Addresses !== null + ? deserializeAws_restJson1IpV6AddressList(output.ipV6Addresses, context) + : undefined, + keyName: __expectString(output.keyName), + launchedAt: + output.launchedAt !== undefined && output.launchedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.launchedAt))) + : undefined, + platform: __expectString(output.platform), + subnetId: __expectString(output.subnetId), + type: __expectString(output.type), + vpcId: __expectString(output.vpcId), + } as any; +}; + +const deserializeAws_restJson1AwsEcrContainerAggregationResponse = ( + output: any, + context: __SerdeContext +): AwsEcrContainerAggregationResponse => { + return { + accountId: __expectString(output.accountId), + architecture: __expectString(output.architecture), + imageSha: __expectString(output.imageSha), + imageTags: + output.imageTags !== undefined && output.imageTags !== null + ? deserializeAws_restJson1StringList(output.imageTags, context) + : undefined, + repository: __expectString(output.repository), + resourceId: __expectString(output.resourceId), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AwsEcrContainerImageDetails = ( + output: any, + context: __SerdeContext +): AwsEcrContainerImageDetails => { + return { + architecture: __expectString(output.architecture), + author: __expectString(output.author), + imageHash: __expectString(output.imageHash), + imageTags: + output.imageTags !== undefined && output.imageTags !== null + ? deserializeAws_restJson1ImageTagList(output.imageTags, context) + : undefined, + platform: __expectString(output.platform), + pushedAt: + output.pushedAt !== undefined && output.pushedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.pushedAt))) + : undefined, + registry: __expectString(output.registry), + repositoryName: __expectString(output.repositoryName), + } as any; +}; + +const deserializeAws_restJson1Counts = (output: any, context: __SerdeContext): Counts => { + return { + count: __expectLong(output.count), + groupKey: __expectString(output.groupKey), + } as any; +}; + +const deserializeAws_restJson1CountsList = (output: any, context: __SerdeContext): Counts[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Counts(entry, context); + }); +}; + +const deserializeAws_restJson1CoveredResource = (output: any, context: __SerdeContext): CoveredResource => { + return { + accountId: __expectString(output.accountId), + resourceId: __expectString(output.resourceId), + resourceMetadata: + output.resourceMetadata !== undefined && output.resourceMetadata !== null + ? deserializeAws_restJson1ResourceScanMetadata(output.resourceMetadata, context) + : undefined, + resourceType: __expectString(output.resourceType), + scanStatus: + output.scanStatus !== undefined && output.scanStatus !== null + ? deserializeAws_restJson1ScanStatus(output.scanStatus, context) + : undefined, + scanType: __expectString(output.scanType), + } as any; +}; + +const deserializeAws_restJson1CoveredResources = (output: any, context: __SerdeContext): CoveredResource[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1CoveredResource(entry, context); + }); +}; + +const deserializeAws_restJson1CvssScore = (output: any, context: __SerdeContext): CvssScore => { + return { + baseScore: __limitedParseDouble(output.baseScore), + scoringVector: __expectString(output.scoringVector), + source: __expectString(output.source), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_restJson1CvssScoreAdjustment = (output: any, context: __SerdeContext): CvssScoreAdjustment => { + return { + metric: __expectString(output.metric), + reason: __expectString(output.reason), + } as any; +}; + +const deserializeAws_restJson1CvssScoreAdjustmentList = ( + output: any, + context: __SerdeContext +): CvssScoreAdjustment[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1CvssScoreAdjustment(entry, context); + }); +}; + +const deserializeAws_restJson1CvssScoreDetails = (output: any, context: __SerdeContext): CvssScoreDetails => { + return { + adjustments: + output.adjustments !== undefined && output.adjustments !== null + ? deserializeAws_restJson1CvssScoreAdjustmentList(output.adjustments, context) + : undefined, + cvssSource: __expectString(output.cvssSource), + score: __limitedParseDouble(output.score), + scoreSource: __expectString(output.scoreSource), + scoringVector: __expectString(output.scoringVector), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_restJson1CvssScoreList = (output: any, context: __SerdeContext): CvssScore[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1CvssScore(entry, context); + }); +}; + +const deserializeAws_restJson1DateFilter = (output: any, context: __SerdeContext): DateFilter => { + return { + endInclusive: + output.endInclusive !== undefined && output.endInclusive !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.endInclusive))) + : undefined, + startInclusive: + output.startInclusive !== undefined && output.startInclusive !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.startInclusive))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1DateFilterList = (output: any, context: __SerdeContext): DateFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DateFilter(entry, context); + }); +}; + +const deserializeAws_restJson1DelegatedAdmin = (output: any, context: __SerdeContext): DelegatedAdmin => { + return { + accountId: __expectString(output.accountId), + relationshipStatus: __expectString(output.relationshipStatus), + } as any; +}; + +const deserializeAws_restJson1DelegatedAdminAccount = (output: any, context: __SerdeContext): DelegatedAdminAccount => { + return { + accountId: __expectString(output.accountId), + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_restJson1DelegatedAdminAccountList = ( + output: any, + context: __SerdeContext +): DelegatedAdminAccount[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DelegatedAdminAccount(entry, context); + }); +}; + +const deserializeAws_restJson1Destination = (output: any, context: __SerdeContext): Destination => { + return { + bucketName: __expectString(output.bucketName), + keyPrefix: __expectString(output.keyPrefix), + kmsKeyArn: __expectString(output.kmsKeyArn), + } as any; +}; + +const deserializeAws_restJson1Ec2InstanceAggregationResponse = ( + output: any, + context: __SerdeContext +): Ec2InstanceAggregationResponse => { + return { + accountId: __expectString(output.accountId), + ami: __expectString(output.ami), + instanceId: __expectString(output.instanceId), + instanceTags: + output.instanceTags !== undefined && output.instanceTags !== null + ? deserializeAws_restJson1TagMap(output.instanceTags, context) + : undefined, + networkFindings: __expectLong(output.networkFindings), + operatingSystem: __expectString(output.operatingSystem), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Ec2Metadata = (output: any, context: __SerdeContext): Ec2Metadata => { + return { + amiId: __expectString(output.amiId), + platform: __expectString(output.platform), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1EcrContainerImageMetadata = ( + output: any, + context: __SerdeContext +): EcrContainerImageMetadata => { + return { + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagList(output.tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1EcrRepositoryMetadata = (output: any, context: __SerdeContext): EcrRepositoryMetadata => { + return { + name: __expectString(output.name), + scanFrequency: __expectString(output.scanFrequency), + } as any; +}; + +const deserializeAws_restJson1FailedAccount = (output: any, context: __SerdeContext): FailedAccount => { + return { + accountId: __expectString(output.accountId), + errorCode: __expectString(output.errorCode), + errorMessage: __expectString(output.errorMessage), + resourceStatus: + output.resourceStatus !== undefined && output.resourceStatus !== null + ? deserializeAws_restJson1ResourceStatus(output.resourceStatus, context) + : undefined, + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_restJson1FailedAccountList = (output: any, context: __SerdeContext): FailedAccount[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1FailedAccount(entry, context); + }); +}; + +const deserializeAws_restJson1Filter = (output: any, context: __SerdeContext): Filter => { + return { + action: __expectString(output.action), + arn: __expectString(output.arn), + createdAt: + output.createdAt !== undefined && output.createdAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.createdAt))) + : undefined, + criteria: + output.criteria !== undefined && output.criteria !== null + ? deserializeAws_restJson1FilterCriteria(output.criteria, context) + : undefined, + description: __expectString(output.description), + name: __expectString(output.name), + ownerId: __expectString(output.ownerId), + reason: __expectString(output.reason), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + updatedAt: + output.updatedAt !== undefined && output.updatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updatedAt))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FilterCriteria = (output: any, context: __SerdeContext): FilterCriteria => { + return { + awsAccountId: + output.awsAccountId !== undefined && output.awsAccountId !== null + ? deserializeAws_restJson1StringFilterList(output.awsAccountId, context) + : undefined, + componentId: + output.componentId !== undefined && output.componentId !== null + ? deserializeAws_restJson1StringFilterList(output.componentId, context) + : undefined, + componentType: + output.componentType !== undefined && output.componentType !== null + ? deserializeAws_restJson1StringFilterList(output.componentType, context) + : undefined, + ec2InstanceImageId: + output.ec2InstanceImageId !== undefined && output.ec2InstanceImageId !== null + ? deserializeAws_restJson1StringFilterList(output.ec2InstanceImageId, context) + : undefined, + ec2InstanceSubnetId: + output.ec2InstanceSubnetId !== undefined && output.ec2InstanceSubnetId !== null + ? deserializeAws_restJson1StringFilterList(output.ec2InstanceSubnetId, context) + : undefined, + ec2InstanceVpcId: + output.ec2InstanceVpcId !== undefined && output.ec2InstanceVpcId !== null + ? deserializeAws_restJson1StringFilterList(output.ec2InstanceVpcId, context) + : undefined, + ecrImageArchitecture: + output.ecrImageArchitecture !== undefined && output.ecrImageArchitecture !== null + ? deserializeAws_restJson1StringFilterList(output.ecrImageArchitecture, context) + : undefined, + ecrImageHash: + output.ecrImageHash !== undefined && output.ecrImageHash !== null + ? deserializeAws_restJson1StringFilterList(output.ecrImageHash, context) + : undefined, + ecrImagePushedAt: + output.ecrImagePushedAt !== undefined && output.ecrImagePushedAt !== null + ? deserializeAws_restJson1DateFilterList(output.ecrImagePushedAt, context) + : undefined, + ecrImageRegistry: + output.ecrImageRegistry !== undefined && output.ecrImageRegistry !== null + ? deserializeAws_restJson1StringFilterList(output.ecrImageRegistry, context) + : undefined, + ecrImageRepositoryName: + output.ecrImageRepositoryName !== undefined && output.ecrImageRepositoryName !== null + ? deserializeAws_restJson1StringFilterList(output.ecrImageRepositoryName, context) + : undefined, + ecrImageTags: + output.ecrImageTags !== undefined && output.ecrImageTags !== null + ? deserializeAws_restJson1StringFilterList(output.ecrImageTags, context) + : undefined, + findingArn: + output.findingArn !== undefined && output.findingArn !== null + ? deserializeAws_restJson1StringFilterList(output.findingArn, context) + : undefined, + findingStatus: + output.findingStatus !== undefined && output.findingStatus !== null + ? deserializeAws_restJson1StringFilterList(output.findingStatus, context) + : undefined, + findingType: + output.findingType !== undefined && output.findingType !== null + ? deserializeAws_restJson1StringFilterList(output.findingType, context) + : undefined, + firstObservedAt: + output.firstObservedAt !== undefined && output.firstObservedAt !== null + ? deserializeAws_restJson1DateFilterList(output.firstObservedAt, context) + : undefined, + inspectorScore: + output.inspectorScore !== undefined && output.inspectorScore !== null + ? deserializeAws_restJson1NumberFilterList(output.inspectorScore, context) + : undefined, + lastObservedAt: + output.lastObservedAt !== undefined && output.lastObservedAt !== null + ? deserializeAws_restJson1DateFilterList(output.lastObservedAt, context) + : undefined, + networkProtocol: + output.networkProtocol !== undefined && output.networkProtocol !== null + ? deserializeAws_restJson1StringFilterList(output.networkProtocol, context) + : undefined, + portRange: + output.portRange !== undefined && output.portRange !== null + ? deserializeAws_restJson1PortRangeFilterList(output.portRange, context) + : undefined, + relatedVulnerabilities: + output.relatedVulnerabilities !== undefined && output.relatedVulnerabilities !== null + ? deserializeAws_restJson1StringFilterList(output.relatedVulnerabilities, context) + : undefined, + resourceId: + output.resourceId !== undefined && output.resourceId !== null + ? deserializeAws_restJson1StringFilterList(output.resourceId, context) + : undefined, + resourceTags: + output.resourceTags !== undefined && output.resourceTags !== null + ? deserializeAws_restJson1MapFilterList(output.resourceTags, context) + : undefined, + resourceType: + output.resourceType !== undefined && output.resourceType !== null + ? deserializeAws_restJson1StringFilterList(output.resourceType, context) + : undefined, + severity: + output.severity !== undefined && output.severity !== null + ? deserializeAws_restJson1StringFilterList(output.severity, context) + : undefined, + title: + output.title !== undefined && output.title !== null + ? deserializeAws_restJson1StringFilterList(output.title, context) + : undefined, + updatedAt: + output.updatedAt !== undefined && output.updatedAt !== null + ? deserializeAws_restJson1DateFilterList(output.updatedAt, context) + : undefined, + vendorSeverity: + output.vendorSeverity !== undefined && output.vendorSeverity !== null + ? deserializeAws_restJson1StringFilterList(output.vendorSeverity, context) + : undefined, + vulnerabilityId: + output.vulnerabilityId !== undefined && output.vulnerabilityId !== null + ? deserializeAws_restJson1StringFilterList(output.vulnerabilityId, context) + : undefined, + vulnerabilitySource: + output.vulnerabilitySource !== undefined && output.vulnerabilitySource !== null + ? deserializeAws_restJson1StringFilterList(output.vulnerabilitySource, context) + : undefined, + vulnerablePackages: + output.vulnerablePackages !== undefined && output.vulnerablePackages !== null + ? deserializeAws_restJson1PackageFilterList(output.vulnerablePackages, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FilterList = (output: any, context: __SerdeContext): Filter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Filter(entry, context); + }); +}; + +const deserializeAws_restJson1Finding = (output: any, context: __SerdeContext): Finding => { + return { + awsAccountId: __expectString(output.awsAccountId), + description: __expectString(output.description), + findingArn: __expectString(output.findingArn), + firstObservedAt: + output.firstObservedAt !== undefined && output.firstObservedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.firstObservedAt))) + : undefined, + inspectorScore: __limitedParseDouble(output.inspectorScore), + inspectorScoreDetails: + output.inspectorScoreDetails !== undefined && output.inspectorScoreDetails !== null + ? deserializeAws_restJson1InspectorScoreDetails(output.inspectorScoreDetails, context) + : undefined, + lastObservedAt: + output.lastObservedAt !== undefined && output.lastObservedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.lastObservedAt))) + : undefined, + networkReachabilityDetails: + output.networkReachabilityDetails !== undefined && output.networkReachabilityDetails !== null + ? deserializeAws_restJson1NetworkReachabilityDetails(output.networkReachabilityDetails, context) + : undefined, + packageVulnerabilityDetails: + output.packageVulnerabilityDetails !== undefined && output.packageVulnerabilityDetails !== null + ? deserializeAws_restJson1PackageVulnerabilityDetails(output.packageVulnerabilityDetails, context) + : undefined, + remediation: + output.remediation !== undefined && output.remediation !== null + ? deserializeAws_restJson1Remediation(output.remediation, context) + : undefined, + resources: + output.resources !== undefined && output.resources !== null + ? deserializeAws_restJson1ResourceList(output.resources, context) + : undefined, + severity: __expectString(output.severity), + status: __expectString(output.status), + title: __expectString(output.title), + type: __expectString(output.type), + updatedAt: + output.updatedAt !== undefined && output.updatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updatedAt))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FindingList = (output: any, context: __SerdeContext): Finding[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Finding(entry, context); + }); +}; + +const deserializeAws_restJson1FindingTypeAggregationResponse = ( + output: any, + context: __SerdeContext +): FindingTypeAggregationResponse => { + return { + accountId: __expectString(output.accountId), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FreeTrialAccountInfo = (output: any, context: __SerdeContext): FreeTrialAccountInfo => { + return { + accountId: __expectString(output.accountId), + freeTrialInfo: + output.freeTrialInfo !== undefined && output.freeTrialInfo !== null + ? deserializeAws_restJson1FreeTrialInfoList(output.freeTrialInfo, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1FreeTrialAccountInfoList = ( + output: any, + context: __SerdeContext +): FreeTrialAccountInfo[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1FreeTrialAccountInfo(entry, context); + }); +}; + +const deserializeAws_restJson1FreeTrialInfo = (output: any, context: __SerdeContext): FreeTrialInfo => { + return { + end: + output.end !== undefined && output.end !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.end))) + : undefined, + start: + output.start !== undefined && output.start !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.start))) + : undefined, + status: __expectString(output.status), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1FreeTrialInfoError = (output: any, context: __SerdeContext): FreeTrialInfoError => { + return { + accountId: __expectString(output.accountId), + code: __expectString(output.code), + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_restJson1FreeTrialInfoErrorList = (output: any, context: __SerdeContext): FreeTrialInfoError[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1FreeTrialInfoError(entry, context); + }); +}; + +const deserializeAws_restJson1FreeTrialInfoList = (output: any, context: __SerdeContext): FreeTrialInfo[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1FreeTrialInfo(entry, context); + }); +}; + +const deserializeAws_restJson1ImageLayerAggregationResponse = ( + output: any, + context: __SerdeContext +): ImageLayerAggregationResponse => { + return { + accountId: __expectString(output.accountId), + layerHash: __expectString(output.layerHash), + repository: __expectString(output.repository), + resourceId: __expectString(output.resourceId), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ImageTagList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1InspectorScoreDetails = (output: any, context: __SerdeContext): InspectorScoreDetails => { + return { + adjustedCvss: + output.adjustedCvss !== undefined && output.adjustedCvss !== null + ? deserializeAws_restJson1CvssScoreDetails(output.adjustedCvss, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1IpV4AddressList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1IpV6AddressList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1MapFilter = (output: any, context: __SerdeContext): MapFilter => { + return { + comparison: __expectString(output.comparison), + key: __expectString(output.key), + value: __expectString(output.value), + } as any; +}; + +const deserializeAws_restJson1MapFilterList = (output: any, context: __SerdeContext): MapFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1MapFilter(entry, context); + }); +}; + +const deserializeAws_restJson1Member = (output: any, context: __SerdeContext): Member => { + return { + accountId: __expectString(output.accountId), + delegatedAdminAccountId: __expectString(output.delegatedAdminAccountId), + relationshipStatus: __expectString(output.relationshipStatus), + updatedAt: + output.updatedAt !== undefined && output.updatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updatedAt))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1MemberList = (output: any, context: __SerdeContext): Member[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Member(entry, context); + }); +}; + +const deserializeAws_restJson1NetworkPath = (output: any, context: __SerdeContext): NetworkPath => { + return { + steps: + output.steps !== undefined && output.steps !== null + ? deserializeAws_restJson1StepList(output.steps, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1NetworkReachabilityDetails = ( + output: any, + context: __SerdeContext +): NetworkReachabilityDetails => { + return { + networkPath: + output.networkPath !== undefined && output.networkPath !== null + ? deserializeAws_restJson1NetworkPath(output.networkPath, context) + : undefined, + openPortRange: + output.openPortRange !== undefined && output.openPortRange !== null + ? deserializeAws_restJson1PortRange(output.openPortRange, context) + : undefined, + protocol: __expectString(output.protocol), + } as any; +}; + +const deserializeAws_restJson1NonEmptyStringList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1NumberFilter = (output: any, context: __SerdeContext): NumberFilter => { + return { + lowerInclusive: __limitedParseDouble(output.lowerInclusive), + upperInclusive: __limitedParseDouble(output.upperInclusive), + } as any; +}; + +const deserializeAws_restJson1NumberFilterList = (output: any, context: __SerdeContext): NumberFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1NumberFilter(entry, context); + }); +}; + +const deserializeAws_restJson1PackageAggregationResponse = ( + output: any, + context: __SerdeContext +): PackageAggregationResponse => { + return { + accountId: __expectString(output.accountId), + packageName: __expectString(output.packageName), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PackageFilter = (output: any, context: __SerdeContext): PackageFilter => { + return { + architecture: + output.architecture !== undefined && output.architecture !== null + ? deserializeAws_restJson1StringFilter(output.architecture, context) + : undefined, + epoch: + output.epoch !== undefined && output.epoch !== null + ? deserializeAws_restJson1NumberFilter(output.epoch, context) + : undefined, + name: + output.name !== undefined && output.name !== null + ? deserializeAws_restJson1StringFilter(output.name, context) + : undefined, + release: + output.release !== undefined && output.release !== null + ? deserializeAws_restJson1StringFilter(output.release, context) + : undefined, + sourceLayerHash: + output.sourceLayerHash !== undefined && output.sourceLayerHash !== null + ? deserializeAws_restJson1StringFilter(output.sourceLayerHash, context) + : undefined, + version: + output.version !== undefined && output.version !== null + ? deserializeAws_restJson1StringFilter(output.version, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PackageFilterList = (output: any, context: __SerdeContext): PackageFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PackageFilter(entry, context); + }); +}; + +const deserializeAws_restJson1PackageVulnerabilityDetails = ( + output: any, + context: __SerdeContext +): PackageVulnerabilityDetails => { + return { + cvss: + output.cvss !== undefined && output.cvss !== null + ? deserializeAws_restJson1CvssScoreList(output.cvss, context) + : undefined, + referenceUrls: + output.referenceUrls !== undefined && output.referenceUrls !== null + ? deserializeAws_restJson1NonEmptyStringList(output.referenceUrls, context) + : undefined, + relatedVulnerabilities: + output.relatedVulnerabilities !== undefined && output.relatedVulnerabilities !== null + ? deserializeAws_restJson1VulnerabilityIdList(output.relatedVulnerabilities, context) + : undefined, + source: __expectString(output.source), + sourceUrl: __expectString(output.sourceUrl), + vendorCreatedAt: + output.vendorCreatedAt !== undefined && output.vendorCreatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.vendorCreatedAt))) + : undefined, + vendorSeverity: __expectString(output.vendorSeverity), + vendorUpdatedAt: + output.vendorUpdatedAt !== undefined && output.vendorUpdatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.vendorUpdatedAt))) + : undefined, + vulnerabilityId: __expectString(output.vulnerabilityId), + vulnerablePackages: + output.vulnerablePackages !== undefined && output.vulnerablePackages !== null + ? deserializeAws_restJson1VulnerablePackageList(output.vulnerablePackages, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Permission = (output: any, context: __SerdeContext): Permission => { + return { + operation: __expectString(output.operation), + service: __expectString(output.service), + } as any; +}; + +const deserializeAws_restJson1Permissions = (output: any, context: __SerdeContext): Permission[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Permission(entry, context); + }); +}; + +const deserializeAws_restJson1PortRange = (output: any, context: __SerdeContext): PortRange => { + return { + begin: __expectInt32(output.begin), + end: __expectInt32(output.end), + } as any; +}; + +const deserializeAws_restJson1PortRangeFilter = (output: any, context: __SerdeContext): PortRangeFilter => { + return { + beginInclusive: __expectInt32(output.beginInclusive), + endInclusive: __expectInt32(output.endInclusive), + } as any; +}; + +const deserializeAws_restJson1PortRangeFilterList = (output: any, context: __SerdeContext): PortRangeFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PortRangeFilter(entry, context); + }); +}; + +const deserializeAws_restJson1Recommendation = (output: any, context: __SerdeContext): Recommendation => { + return { + Url: __expectString(output.Url), + text: __expectString(output.text), + } as any; +}; + +const deserializeAws_restJson1Remediation = (output: any, context: __SerdeContext): Remediation => { + return { + recommendation: + output.recommendation !== undefined && output.recommendation !== null + ? deserializeAws_restJson1Recommendation(output.recommendation, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1RepositoryAggregationResponse = ( + output: any, + context: __SerdeContext +): RepositoryAggregationResponse => { + return { + accountId: __expectString(output.accountId), + affectedImages: __expectLong(output.affectedImages), + repository: __expectString(output.repository), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Resource = (output: any, context: __SerdeContext): Resource => { + return { + details: + output.details !== undefined && output.details !== null + ? deserializeAws_restJson1ResourceDetails(output.details, context) + : undefined, + id: __expectString(output.id), + partition: __expectString(output.partition), + region: __expectString(output.region), + tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1TagMap(output.tags, context) + : undefined, + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1ResourceDetails = (output: any, context: __SerdeContext): ResourceDetails => { + return { + awsEc2Instance: + output.awsEc2Instance !== undefined && output.awsEc2Instance !== null + ? deserializeAws_restJson1AwsEc2InstanceDetails(output.awsEc2Instance, context) + : undefined, + awsEcrContainerImage: + output.awsEcrContainerImage !== undefined && output.awsEcrContainerImage !== null + ? deserializeAws_restJson1AwsEcrContainerImageDetails(output.awsEcrContainerImage, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ResourceList = (output: any, context: __SerdeContext): Resource[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Resource(entry, context); + }); +}; + +const deserializeAws_restJson1ResourceScanMetadata = (output: any, context: __SerdeContext): ResourceScanMetadata => { + return { + ec2: + output.ec2 !== undefined && output.ec2 !== null + ? deserializeAws_restJson1Ec2Metadata(output.ec2, context) + : undefined, + ecrImage: + output.ecrImage !== undefined && output.ecrImage !== null + ? deserializeAws_restJson1EcrContainerImageMetadata(output.ecrImage, context) + : undefined, + ecrRepository: + output.ecrRepository !== undefined && output.ecrRepository !== null + ? deserializeAws_restJson1EcrRepositoryMetadata(output.ecrRepository, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ResourceState = (output: any, context: __SerdeContext): ResourceState => { + return { + ec2: + output.ec2 !== undefined && output.ec2 !== null ? deserializeAws_restJson1State(output.ec2, context) : undefined, + ecr: + output.ecr !== undefined && output.ecr !== null ? deserializeAws_restJson1State(output.ecr, context) : undefined, + } as any; +}; + +const deserializeAws_restJson1ResourceStatus = (output: any, context: __SerdeContext): ResourceStatus => { + return { + ec2: __expectString(output.ec2), + ecr: __expectString(output.ecr), + } as any; +}; + +const deserializeAws_restJson1ScanStatus = (output: any, context: __SerdeContext): ScanStatus => { + return { + reason: __expectString(output.reason), + statusCode: __expectString(output.statusCode), + } as any; +}; + +const deserializeAws_restJson1SeverityCounts = (output: any, context: __SerdeContext): SeverityCounts => { + return { + all: __expectLong(output.all), + critical: __expectLong(output.critical), + high: __expectLong(output.high), + medium: __expectLong(output.medium), + } as any; +}; + +const deserializeAws_restJson1State = (output: any, context: __SerdeContext): State => { + return { + errorCode: __expectString(output.errorCode), + errorMessage: __expectString(output.errorMessage), + status: __expectString(output.status), + } as any; +}; + +const deserializeAws_restJson1Step = (output: any, context: __SerdeContext): Step => { + return { + componentId: __expectString(output.componentId), + componentType: __expectString(output.componentType), + } as any; +}; + +const deserializeAws_restJson1StepList = (output: any, context: __SerdeContext): Step[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Step(entry, context); + }); +}; + +const deserializeAws_restJson1StringFilter = (output: any, context: __SerdeContext): StringFilter => { + return { + comparison: __expectString(output.comparison), + value: __expectString(output.value), + } as any; +}; + +const deserializeAws_restJson1StringFilterList = (output: any, context: __SerdeContext): StringFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1StringFilter(entry, context); + }); +}; + +const deserializeAws_restJson1StringList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1TagList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1TagMap = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1TitleAggregationResponse = ( + output: any, + context: __SerdeContext +): TitleAggregationResponse => { + return { + accountId: __expectString(output.accountId), + severityCounts: + output.severityCounts !== undefined && output.severityCounts !== null + ? deserializeAws_restJson1SeverityCounts(output.severityCounts, context) + : undefined, + title: __expectString(output.title), + vulnerabilityId: __expectString(output.vulnerabilityId), + } as any; +}; + +const deserializeAws_restJson1Usage = (output: any, context: __SerdeContext): Usage => { + return { + currency: __expectString(output.currency), + estimatedMonthlyCost: __limitedParseDouble(output.estimatedMonthlyCost), + total: __limitedParseDouble(output.total), + type: __expectString(output.type), + } as any; +}; + +const deserializeAws_restJson1UsageList = (output: any, context: __SerdeContext): Usage[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Usage(entry, context); + }); +}; + +const deserializeAws_restJson1UsageTotal = (output: any, context: __SerdeContext): UsageTotal => { + return { + accountId: __expectString(output.accountId), + usage: + output.usage !== undefined && output.usage !== null + ? deserializeAws_restJson1UsageList(output.usage, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1UsageTotalList = (output: any, context: __SerdeContext): UsageTotal[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1UsageTotal(entry, context); + }); +}; + +const deserializeAws_restJson1ValidationExceptionField = ( + output: any, + context: __SerdeContext +): ValidationExceptionField => { + return { + message: __expectString(output.message), + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_restJson1ValidationExceptionFields = ( + output: any, + context: __SerdeContext +): ValidationExceptionField[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ValidationExceptionField(entry, context); + }); +}; + +const deserializeAws_restJson1VulnerabilityIdList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1VulnerablePackage = (output: any, context: __SerdeContext): VulnerablePackage => { + return { + arch: __expectString(output.arch), + epoch: __expectInt32(output.epoch), + filePath: __expectString(output.filePath), + fixedInVersion: __expectString(output.fixedInVersion), + name: __expectString(output.name), + packageManager: __expectString(output.packageManager), + release: __expectString(output.release), + sourceLayerHash: __expectString(output.sourceLayerHash), + version: __expectString(output.version), + } as any; +}; + +const deserializeAws_restJson1VulnerablePackageList = (output: any, context: __SerdeContext): VulnerablePackage[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1VulnerablePackage(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-inspector2/src/runtimeConfig.browser.ts b/clients/client-inspector2/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..cbe1dc3826e7 --- /dev/null +++ b/clients/client-inspector2/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { Inspector2ClientConfig } from "./Inspector2Client"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: Inspector2ClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-inspector2/src/runtimeConfig.native.ts b/clients/client-inspector2/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..4884a1b9239c --- /dev/null +++ b/clients/client-inspector2/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { Inspector2ClientConfig } from "./Inspector2Client"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: Inspector2ClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-inspector2/src/runtimeConfig.shared.ts b/clients/client-inspector2/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..69f21bbe3af9 --- /dev/null +++ b/clients/client-inspector2/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { Inspector2ClientConfig } from "./Inspector2Client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: Inspector2ClientConfig) => ({ + apiVersion: "2020-06-08", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "Inspector2", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-inspector2/src/runtimeConfig.ts b/clients/client-inspector2/src/runtimeConfig.ts new file mode 100644 index 000000000000..ae0659ae37fd --- /dev/null +++ b/clients/client-inspector2/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { Inspector2ClientConfig } from "./Inspector2Client"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: Inspector2ClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-inspector2/tsconfig.es.json b/clients/client-inspector2/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-inspector2/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-inspector2/tsconfig.json b/clients/client-inspector2/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-inspector2/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-inspector2/tsconfig.types.json b/clients/client-inspector2/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-inspector2/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts b/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts index c50694900586..6afba4c209c4 100644 --- a/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts +++ b/clients/client-iot/src/commands/ListThingRegistrationTasksCommand.ts @@ -12,7 +12,8 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingRegistrationTasksRequest, ListThingRegistrationTasksResponse } from "../models/models_1"; +import { ListThingRegistrationTasksRequest } from "../models/models_1"; +import { ListThingRegistrationTasksResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingRegistrationTasksCommand, serializeAws_restJson1ListThingRegistrationTasksCommand, diff --git a/clients/client-iot/src/commands/ListThingsCommand.ts b/clients/client-iot/src/commands/ListThingsCommand.ts index a89d0ad0826a..2d2ccfc262e1 100644 --- a/clients/client-iot/src/commands/ListThingsCommand.ts +++ b/clients/client-iot/src/commands/ListThingsCommand.ts @@ -12,8 +12,7 @@ import { } from "@aws-sdk/types"; import { IoTClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTClient"; -import { ListThingsRequest } from "../models/models_1"; -import { ListThingsResponse } from "../models/models_2"; +import { ListThingsRequest, ListThingsResponse } from "../models/models_2"; import { deserializeAws_restJson1ListThingsCommand, serializeAws_restJson1ListThingsCommand, diff --git a/clients/client-iot/src/models/models_1.ts b/clients/client-iot/src/models/models_1.ts index fbb47641de94..4d02e978bba5 100644 --- a/clients/client-iot/src/models/models_1.ts +++ b/clients/client-iot/src/models/models_1.ts @@ -1897,6 +1897,10 @@ export interface DescribeIndexResponse { *

                                  REGISTRY_AND_SHADOW_AND_CONNECTIVITY_STATUS - Your thing index contains registry * data, shadow data, and thing connectivity status data.

                                  * + *
                                • + *

                                  MULTI_INDEXING_MODE - Your thing index contains multiple data sources. For more information, see + * GetIndexingConfiguration.

                                  + *
                                • *
                                */ schema?: string; @@ -4053,6 +4057,16 @@ export namespace ThingGroupIndexingConfiguration { }); } +export enum DeviceDefenderIndexingMode { + OFF = "OFF", + VIOLATIONS = "VIOLATIONS", +} + +export enum NamedShadowIndexingMode { + OFF = "OFF", + ON = "ON", +} + export enum ThingConnectivityIndexingMode { OFF = "OFF", STATUS = "STATUS", @@ -4100,6 +4114,40 @@ export interface ThingIndexingConfiguration { */ thingConnectivityIndexingMode?: ThingConnectivityIndexingMode | string; + /** + *

                                Device Defender indexing mode. Valid values are:

                                + *
                                  + *
                                • + *

                                  VIOLATIONS – Your thing index contains Device Defender violations. To enable Device + * Defender indexing, deviceDefenderIndexingMode must not be set to + * OFF.

                                  + *
                                • + *
                                • + *

                                  OFF - Device Defender indexing is disabled.

                                  + *
                                • + *
                                + *

                                For more information about Device Defender violations, see Device Defender Detect. + *

                                + */ + deviceDefenderIndexingMode?: DeviceDefenderIndexingMode | string; + + /** + *

                                Named shadow indexing mode. Valid values are:

                                + *
                                  + *
                                • + *

                                  ON – Your thing index contains named shadow. To enable thing + * named shadow indexing, namedShadowIndexingMode must not be set to + * OFF.

                                  + *
                                • + *
                                • + *

                                  OFF - Named shadow indexing is disabled.

                                  + *
                                • + *
                                + *

                                For more information about Shadows, see IoT Device Shadow service. + *

                                + */ + namedShadowIndexingMode?: NamedShadowIndexingMode | string; + /** *

                                Contains fields that are indexed and whose types are already known by the Fleet Indexing * service.

                                @@ -8263,74 +8311,3 @@ export namespace ListThingRegistrationTasksRequest { ...obj, }); } - -export interface ListThingRegistrationTasksResponse { - /** - *

                                A list of bulk thing provisioning task IDs.

                                - */ - taskIds?: string[]; - - /** - *

                                The token to use to get the next set of results, or null if there are no additional results.

                                - */ - nextToken?: string; -} - -export namespace ListThingRegistrationTasksResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingRegistrationTasksResponse): any => ({ - ...obj, - }); -} - -/** - *

                                The input for the ListThings operation.

                                - */ -export interface ListThingsRequest { - /** - *

                                To retrieve the next set of results, the nextToken - * value from a previous response; otherwise null to receive - * the first set of results.

                                - */ - nextToken?: string; - - /** - *

                                The maximum number of results to return in this operation.

                                - */ - maxResults?: number; - - /** - *

                                The attribute name used to search for things.

                                - */ - attributeName?: string; - - /** - *

                                The attribute value used to search for things.

                                - */ - attributeValue?: string; - - /** - *

                                The name of the thing type used to search for things.

                                - */ - thingTypeName?: string; - - /** - *

                                When true, the action returns the thing resources with attribute values - * that start with the attributeValue provided.

                                - *

                                When false, or not present, the action returns only the thing - * resources with attribute values that match the entire attributeValue - * provided.

                                - */ - usePrefixAttributeValue?: boolean; -} - -export namespace ListThingsRequest { - /** - * @internal - */ - export const filterSensitiveLog = (obj: ListThingsRequest): any => ({ - ...obj, - }); -} diff --git a/clients/client-iot/src/models/models_2.ts b/clients/client-iot/src/models/models_2.ts index 09d6b0083fe1..dece250b5615 100644 --- a/clients/client-iot/src/models/models_2.ts +++ b/clients/client-iot/src/models/models_2.ts @@ -53,6 +53,77 @@ import { ViolationEventOccurrenceRange, } from "./models_1"; +export interface ListThingRegistrationTasksResponse { + /** + *

                                A list of bulk thing provisioning task IDs.

                                + */ + taskIds?: string[]; + + /** + *

                                The token to use to get the next set of results, or null if there are no additional results.

                                + */ + nextToken?: string; +} + +export namespace ListThingRegistrationTasksResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingRegistrationTasksResponse): any => ({ + ...obj, + }); +} + +/** + *

                                The input for the ListThings operation.

                                + */ +export interface ListThingsRequest { + /** + *

                                To retrieve the next set of results, the nextToken + * value from a previous response; otherwise null to receive + * the first set of results.

                                + */ + nextToken?: string; + + /** + *

                                The maximum number of results to return in this operation.

                                + */ + maxResults?: number; + + /** + *

                                The attribute name used to search for things.

                                + */ + attributeName?: string; + + /** + *

                                The attribute value used to search for things.

                                + */ + attributeValue?: string; + + /** + *

                                The name of the thing type used to search for things.

                                + */ + thingTypeName?: string; + + /** + *

                                When true, the action returns the thing resources with attribute values + * that start with the attributeValue provided.

                                + *

                                When false, or not present, the action returns only the thing + * resources with attribute values that match the entire attributeValue + * provided.

                                + */ + usePrefixAttributeValue?: boolean; +} + +export namespace ListThingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListThingsRequest): any => ({ + ...obj, + }); +} + /** *

                                The properties of the thing, including thing name, thing type name, and a list of thing * attributes.

                                @@ -1435,10 +1506,18 @@ export interface ThingDocument { attributes?: { [key: string]: string }; /** - *

                                The shadow.

                                + *

                                The unnamed shadow and named shadow.

                                + *

                                For more information about shadows, see IoT Device Shadow service. + *

                                */ shadow?: string; + /** + *

                                Contains Device Defender data.

                                + *

                                For more information about Device Defender, see Device Defender.

                                + */ + deviceDefender?: string; + /** *

                                Indicates whether the thing is connected to the Amazon Web Services IoT Core service.

                                */ @@ -2878,7 +2957,7 @@ export interface UpdateFleetMetricRequest { /** *

                                Used to support unit transformation such as milliseconds to seconds. The unit must be - * supported by CW metric.

                                + * supported by CW metric.

                                */ unit?: FleetMetricUnit | string; diff --git a/clients/client-iot/src/protocols/Aws_restJson1.ts b/clients/client-iot/src/protocols/Aws_restJson1.ts index c6d6da73a0ed..f517c681362e 100644 --- a/clients/client-iot/src/protocols/Aws_restJson1.ts +++ b/clients/client-iot/src/protocols/Aws_restJson1.ts @@ -32565,8 +32565,12 @@ const serializeAws_restJson1ThingIndexingConfiguration = ( return { ...(input.customFields !== undefined && input.customFields !== null && { customFields: serializeAws_restJson1Fields(input.customFields, context) }), + ...(input.deviceDefenderIndexingMode !== undefined && + input.deviceDefenderIndexingMode !== null && { deviceDefenderIndexingMode: input.deviceDefenderIndexingMode }), ...(input.managedFields !== undefined && input.managedFields !== null && { managedFields: serializeAws_restJson1Fields(input.managedFields, context) }), + ...(input.namedShadowIndexingMode !== undefined && + input.namedShadowIndexingMode !== null && { namedShadowIndexingMode: input.namedShadowIndexingMode }), ...(input.thingConnectivityIndexingMode !== undefined && input.thingConnectivityIndexingMode !== null && { thingConnectivityIndexingMode: input.thingConnectivityIndexingMode, @@ -36011,6 +36015,7 @@ const deserializeAws_restJson1ThingDocument = (output: any, context: __SerdeCont output.connectivity !== undefined && output.connectivity !== null ? deserializeAws_restJson1ThingConnectivity(output.connectivity, context) : undefined, + deviceDefender: __expectString(output.deviceDefender), shadow: __expectString(output.shadow), thingGroupNames: output.thingGroupNames !== undefined && output.thingGroupNames !== null @@ -36143,10 +36148,12 @@ const deserializeAws_restJson1ThingIndexingConfiguration = ( output.customFields !== undefined && output.customFields !== null ? deserializeAws_restJson1Fields(output.customFields, context) : undefined, + deviceDefenderIndexingMode: __expectString(output.deviceDefenderIndexingMode), managedFields: output.managedFields !== undefined && output.managedFields !== null ? deserializeAws_restJson1Fields(output.managedFields, context) : undefined, + namedShadowIndexingMode: __expectString(output.namedShadowIndexingMode), thingConnectivityIndexingMode: __expectString(output.thingConnectivityIndexingMode), thingIndexingMode: __expectString(output.thingIndexingMode), } as any; diff --git a/clients/client-iotsitewise/src/models/models_0.ts b/clients/client-iotsitewise/src/models/models_0.ts index 72809d2077d5..f77bf3d41d34 100644 --- a/clients/client-iotsitewise/src/models/models_0.ts +++ b/clients/client-iotsitewise/src/models/models_0.ts @@ -2612,7 +2612,7 @@ export interface CreatePortalRequest { *
                              • *

                                * IAM – The portal uses Identity and Access Management to authenticate users and manage - * user permissions. This option is only available in the China Regions.

                                + * user permissions.

                                *
                              • *
                              *

                              You can't change this value after you create a portal.

                              @@ -4118,6 +4118,38 @@ export namespace MultiLayerStorage { }); } +/** + *

                              How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                              + */ +export interface RetentionPeriod { + /** + *

                              The number of days that your data is kept.

                              + * + *

                              If you specified a value for this parameter, the unlimited parameter must + * be false.

                              + *
                              + */ + numberOfDays?: number; + + /** + *

                              If true, your data is kept indefinitely.

                              + * + *

                              If configured to true, you must not specify a value for the + * numberOfDays parameter.

                              + *
                              + */ + unlimited?: boolean; +} + +export namespace RetentionPeriod { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetentionPeriod): any => ({ + ...obj, + }); +} + export enum StorageType { MULTI_LAYER_STORAGE = "MULTI_LAYER_STORAGE", SITEWISE_DEFAULT_STORAGE = "SITEWISE_DEFAULT_STORAGE", @@ -4125,15 +4157,18 @@ export enum StorageType { export interface DescribeStorageConfigurationResponse { /** - *

                              The type of storage that you specified for your data. The storage type can be one of the following values:

                              + *

                              The storage tier that you specified for your data. + * The storageType parameter can be one of the following values:

                              *
                                *
                              • *

                                - * SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                + * SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. + * The hot tier is a service-managed database.

                                *
                              • *
                              • *

                                - * MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                + * MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. + * The cold tier is a customer-managed Amazon S3 bucket.

                                *
                              • *
                              */ @@ -4165,6 +4200,11 @@ export interface DescribeStorageConfigurationResponse { */ disassociatedDataStorage?: DisassociatedDataStorageState | string; + /** + *

                              How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                              + */ + retentionPeriod?: RetentionPeriod; + /** *

                              Contains current status information for the configuration.

                              */ @@ -5761,15 +5801,18 @@ export namespace PutLoggingOptionsResponse { export interface PutStorageConfigurationRequest { /** - *

                              The type of storage that you specified for your data. The storage type can be one of the following values:

                              + *

                              The storage tier that you specified for your data. + * The storageType parameter can be one of the following values:

                              *
                                *
                              • *

                                - * SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                + * SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. + * The hot tier is a service-managed database.

                                *
                              • *
                              • *

                                - * MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                + * MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. + * The cold tier is a customer-managed Amazon S3 bucket.

                                *
                              • *
                              */ @@ -5801,6 +5844,11 @@ export interface PutStorageConfigurationRequest { * in the IoT SiteWise User Guide.

                              */ disassociatedDataStorage?: DisassociatedDataStorageState | string; + + /** + *

                              How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                              + */ + retentionPeriod?: RetentionPeriod; } export namespace PutStorageConfigurationRequest { @@ -5814,15 +5862,18 @@ export namespace PutStorageConfigurationRequest { export interface PutStorageConfigurationResponse { /** - *

                              The type of storage that you specified for your data. The storage type can be one of the following values:

                              + *

                              The storage tier that you specified for your data. + * The storageType parameter can be one of the following values:

                              *
                                *
                              • *

                                - * SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                + * SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. + * The hot tier is a service-managed database.

                                *
                              • *
                              • *

                                - * MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                + * MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. + * The cold tier is a customer-managed Amazon S3 bucket.

                                *
                              • *
                              */ @@ -5854,6 +5905,11 @@ export interface PutStorageConfigurationResponse { */ disassociatedDataStorage?: DisassociatedDataStorageState | string; + /** + *

                              How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                              + */ + retentionPeriod?: RetentionPeriod; + /** *

                              Contains current status information for the configuration.

                              */ diff --git a/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts b/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts index 35d58a50b688..7882f7a11a3f 100644 --- a/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts +++ b/clients/client-iotsitewise/src/protocols/Aws_restJson1.ts @@ -228,6 +228,7 @@ import { Resource, ResourceAlreadyExistsException, ResourceNotFoundException, + RetentionPeriod, ServiceUnavailableException, ThrottlingException, TimeInNanos, @@ -2243,6 +2244,10 @@ export const serializeAws_restJson1PutStorageConfigurationCommand = async ( input.multiLayerStorage !== null && { multiLayerStorage: serializeAws_restJson1MultiLayerStorage(input.multiLayerStorage, context), }), + ...(input.retentionPeriod !== undefined && + input.retentionPeriod !== null && { + retentionPeriod: serializeAws_restJson1RetentionPeriod(input.retentionPeriod, context), + }), ...(input.storageType !== undefined && input.storageType !== null && { storageType: input.storageType }), }); let { hostname: resolvedHostname } = await context.endpoint(); @@ -5692,6 +5697,7 @@ export const deserializeAws_restJson1DescribeStorageConfigurationCommand = async disassociatedDataStorage: undefined, lastUpdateDate: undefined, multiLayerStorage: undefined, + retentionPeriod: undefined, storageType: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); @@ -5707,6 +5713,9 @@ export const deserializeAws_restJson1DescribeStorageConfigurationCommand = async if (data.multiLayerStorage !== undefined && data.multiLayerStorage !== null) { contents.multiLayerStorage = deserializeAws_restJson1MultiLayerStorage(data.multiLayerStorage, context); } + if (data.retentionPeriod !== undefined && data.retentionPeriod !== null) { + contents.retentionPeriod = deserializeAws_restJson1RetentionPeriod(data.retentionPeriod, context); + } if (data.storageType !== undefined && data.storageType !== null) { contents.storageType = __expectString(data.storageType); } @@ -7586,6 +7595,7 @@ export const deserializeAws_restJson1PutStorageConfigurationCommand = async ( configurationStatus: undefined, disassociatedDataStorage: undefined, multiLayerStorage: undefined, + retentionPeriod: undefined, storageType: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); @@ -7598,6 +7608,9 @@ export const deserializeAws_restJson1PutStorageConfigurationCommand = async ( if (data.multiLayerStorage !== undefined && data.multiLayerStorage !== null) { contents.multiLayerStorage = deserializeAws_restJson1MultiLayerStorage(data.multiLayerStorage, context); } + if (data.retentionPeriod !== undefined && data.retentionPeriod !== null) { + contents.retentionPeriod = deserializeAws_restJson1RetentionPeriod(data.retentionPeriod, context); + } if (data.storageType !== undefined && data.storageType !== null) { contents.storageType = __expectString(data.storageType); } @@ -9294,6 +9307,13 @@ const serializeAws_restJson1Resource = (input: Resource, context: __SerdeContext }; }; +const serializeAws_restJson1RetentionPeriod = (input: RetentionPeriod, context: __SerdeContext): any => { + return { + ...(input.numberOfDays !== undefined && input.numberOfDays !== null && { numberOfDays: input.numberOfDays }), + ...(input.unlimited !== undefined && input.unlimited !== null && { unlimited: input.unlimited }), + }; +}; + const serializeAws_restJson1TagMap = (input: { [key: string]: string }, context: __SerdeContext): any => { return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { if (value === null) { @@ -10392,6 +10412,13 @@ const deserializeAws_restJson1Resource = (output: any, context: __SerdeContext): } as any; }; +const deserializeAws_restJson1RetentionPeriod = (output: any, context: __SerdeContext): RetentionPeriod => { + return { + numberOfDays: __expectInt32(output.numberOfDays), + unlimited: __expectBoolean(output.unlimited), + } as any; +}; + const deserializeAws_restJson1TagMap = (output: any, context: __SerdeContext): { [key: string]: string } => { return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { if (value === null) { diff --git a/clients/client-iottwinmaker/.gitignore b/clients/client-iottwinmaker/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-iottwinmaker/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-iottwinmaker/LICENSE b/clients/client-iottwinmaker/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-iottwinmaker/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-iottwinmaker/README.md b/clients/client-iottwinmaker/README.md new file mode 100644 index 000000000000..7531d68a8b09 --- /dev/null +++ b/clients/client-iottwinmaker/README.md @@ -0,0 +1,212 @@ +# @aws-sdk/client-iottwinmaker + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-iottwinmaker/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-iottwinmaker) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-iottwinmaker.svg)](https://www.npmjs.com/package/@aws-sdk/client-iottwinmaker) + +## Description + +AWS SDK for JavaScript IoTTwinMaker Client for Node.js, Browser and React Native. + + +

                              +TwinMaker is in public preview and is subject to change. +

                              +
                              +

                              IoT TwinMaker is a service that enables you to build operational digital twins of +physical systems. IoT TwinMaker overlays measurements and analysis from real-world sensors, +cameras, and enterprise applications so you can create data visualizations to monitor your +physical factory, building, or industrial plant. You can use this real-world data to +monitor operations and diagnose and repair errors.

                              + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-iottwinmaker +using your favorite package manager: + +- `npm install @aws-sdk/client-iottwinmaker` +- `yarn add @aws-sdk/client-iottwinmaker` +- `pnpm add @aws-sdk/client-iottwinmaker` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `IoTTwinMakerClient` and +the commands you need, for example `BatchPutPropertyValuesCommand`: + +```js +// ES5 example +const { IoTTwinMakerClient, BatchPutPropertyValuesCommand } = require("@aws-sdk/client-iottwinmaker"); +``` + +```ts +// ES6+ example +import { IoTTwinMakerClient, BatchPutPropertyValuesCommand } from "@aws-sdk/client-iottwinmaker"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new IoTTwinMakerClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new BatchPutPropertyValuesCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-iottwinmaker"; +const client = new AWS.IoTTwinMaker({ region: "REGION" }); + +// async/await. +try { + const data = await client.batchPutPropertyValues(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .batchPutPropertyValues(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.batchPutPropertyValues(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-iottwinmaker` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-iottwinmaker/jest.config.js b/clients/client-iottwinmaker/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-iottwinmaker/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-iottwinmaker/package.json b/clients/client-iottwinmaker/package.json new file mode 100644 index 000000000000..f21565461de0 --- /dev/null +++ b/clients/client-iottwinmaker/package.json @@ -0,0 +1,94 @@ +{ + "name": "@aws-sdk/client-iottwinmaker", + "description": "AWS SDK for JavaScript Iottwinmaker Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-iottwinmaker", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-iottwinmaker" + } +} diff --git a/clients/client-iottwinmaker/src/IoTTwinMaker.ts b/clients/client-iottwinmaker/src/IoTTwinMaker.ts new file mode 100644 index 000000000000..0b657b2399f6 --- /dev/null +++ b/clients/client-iottwinmaker/src/IoTTwinMaker.ts @@ -0,0 +1,902 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + BatchPutPropertyValuesCommand, + BatchPutPropertyValuesCommandInput, + BatchPutPropertyValuesCommandOutput, +} from "./commands/BatchPutPropertyValuesCommand"; +import { + CreateComponentTypeCommand, + CreateComponentTypeCommandInput, + CreateComponentTypeCommandOutput, +} from "./commands/CreateComponentTypeCommand"; +import { + CreateEntityCommand, + CreateEntityCommandInput, + CreateEntityCommandOutput, +} from "./commands/CreateEntityCommand"; +import { CreateSceneCommand, CreateSceneCommandInput, CreateSceneCommandOutput } from "./commands/CreateSceneCommand"; +import { + CreateWorkspaceCommand, + CreateWorkspaceCommandInput, + CreateWorkspaceCommandOutput, +} from "./commands/CreateWorkspaceCommand"; +import { + DeleteComponentTypeCommand, + DeleteComponentTypeCommandInput, + DeleteComponentTypeCommandOutput, +} from "./commands/DeleteComponentTypeCommand"; +import { + DeleteEntityCommand, + DeleteEntityCommandInput, + DeleteEntityCommandOutput, +} from "./commands/DeleteEntityCommand"; +import { DeleteSceneCommand, DeleteSceneCommandInput, DeleteSceneCommandOutput } from "./commands/DeleteSceneCommand"; +import { + DeleteWorkspaceCommand, + DeleteWorkspaceCommandInput, + DeleteWorkspaceCommandOutput, +} from "./commands/DeleteWorkspaceCommand"; +import { + GetComponentTypeCommand, + GetComponentTypeCommandInput, + GetComponentTypeCommandOutput, +} from "./commands/GetComponentTypeCommand"; +import { GetEntityCommand, GetEntityCommandInput, GetEntityCommandOutput } from "./commands/GetEntityCommand"; +import { + GetPropertyValueCommand, + GetPropertyValueCommandInput, + GetPropertyValueCommandOutput, +} from "./commands/GetPropertyValueCommand"; +import { + GetPropertyValueHistoryCommand, + GetPropertyValueHistoryCommandInput, + GetPropertyValueHistoryCommandOutput, +} from "./commands/GetPropertyValueHistoryCommand"; +import { GetSceneCommand, GetSceneCommandInput, GetSceneCommandOutput } from "./commands/GetSceneCommand"; +import { + GetWorkspaceCommand, + GetWorkspaceCommandInput, + GetWorkspaceCommandOutput, +} from "./commands/GetWorkspaceCommand"; +import { + ListComponentTypesCommand, + ListComponentTypesCommandInput, + ListComponentTypesCommandOutput, +} from "./commands/ListComponentTypesCommand"; +import { + ListEntitiesCommand, + ListEntitiesCommandInput, + ListEntitiesCommandOutput, +} from "./commands/ListEntitiesCommand"; +import { ListScenesCommand, ListScenesCommandInput, ListScenesCommandOutput } from "./commands/ListScenesCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListWorkspacesCommand, + ListWorkspacesCommandInput, + ListWorkspacesCommandOutput, +} from "./commands/ListWorkspacesCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateComponentTypeCommand, + UpdateComponentTypeCommandInput, + UpdateComponentTypeCommandOutput, +} from "./commands/UpdateComponentTypeCommand"; +import { + UpdateEntityCommand, + UpdateEntityCommandInput, + UpdateEntityCommandOutput, +} from "./commands/UpdateEntityCommand"; +import { UpdateSceneCommand, UpdateSceneCommandInput, UpdateSceneCommandOutput } from "./commands/UpdateSceneCommand"; +import { + UpdateWorkspaceCommand, + UpdateWorkspaceCommandInput, + UpdateWorkspaceCommandOutput, +} from "./commands/UpdateWorkspaceCommand"; +import { IoTTwinMakerClient } from "./IoTTwinMakerClient"; + +/** + * + *

                              + * TwinMaker is in public preview and is subject to change. + *

                              + *
                              + *

                              IoT TwinMaker is a service that enables you to build operational digital twins of + * physical systems. IoT TwinMaker overlays measurements and analysis from real-world sensors, + * cameras, and enterprise applications so you can create data visualizations to monitor your + * physical factory, building, or industrial plant. You can use this real-world data to + * monitor operations and diagnose and repair errors.

                              + */ +export class IoTTwinMaker extends IoTTwinMakerClient { + /** + *

                              Sets values for multiple time series properties.

                              + */ + public batchPutPropertyValues( + args: BatchPutPropertyValuesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public batchPutPropertyValues( + args: BatchPutPropertyValuesCommandInput, + cb: (err: any, data?: BatchPutPropertyValuesCommandOutput) => void + ): void; + public batchPutPropertyValues( + args: BatchPutPropertyValuesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: BatchPutPropertyValuesCommandOutput) => void + ): void; + public batchPutPropertyValues( + args: BatchPutPropertyValuesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: BatchPutPropertyValuesCommandOutput) => void), + cb?: (err: any, data?: BatchPutPropertyValuesCommandOutput) => void + ): Promise | void { + const command = new BatchPutPropertyValuesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Creates a component type.

                              + * + *

                              + * TwinMaker is in public preview and is subject to change. + *

                              + *
                              + */ + public createComponentType( + args: CreateComponentTypeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createComponentType( + args: CreateComponentTypeCommandInput, + cb: (err: any, data?: CreateComponentTypeCommandOutput) => void + ): void; + public createComponentType( + args: CreateComponentTypeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateComponentTypeCommandOutput) => void + ): void; + public createComponentType( + args: CreateComponentTypeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateComponentTypeCommandOutput) => void), + cb?: (err: any, data?: CreateComponentTypeCommandOutput) => void + ): Promise | void { + const command = new CreateComponentTypeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Creates an entity.

                              + */ + public createEntity( + args: CreateEntityCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createEntity(args: CreateEntityCommandInput, cb: (err: any, data?: CreateEntityCommandOutput) => void): void; + public createEntity( + args: CreateEntityCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateEntityCommandOutput) => void + ): void; + public createEntity( + args: CreateEntityCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateEntityCommandOutput) => void), + cb?: (err: any, data?: CreateEntityCommandOutput) => void + ): Promise | void { + const command = new CreateEntityCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Creates a scene.

                              + */ + public createScene(args: CreateSceneCommandInput, options?: __HttpHandlerOptions): Promise; + public createScene(args: CreateSceneCommandInput, cb: (err: any, data?: CreateSceneCommandOutput) => void): void; + public createScene( + args: CreateSceneCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateSceneCommandOutput) => void + ): void; + public createScene( + args: CreateSceneCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateSceneCommandOutput) => void), + cb?: (err: any, data?: CreateSceneCommandOutput) => void + ): Promise | void { + const command = new CreateSceneCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Creates a workplace.

                              + */ + public createWorkspace( + args: CreateWorkspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createWorkspace( + args: CreateWorkspaceCommandInput, + cb: (err: any, data?: CreateWorkspaceCommandOutput) => void + ): void; + public createWorkspace( + args: CreateWorkspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateWorkspaceCommandOutput) => void + ): void; + public createWorkspace( + args: CreateWorkspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateWorkspaceCommandOutput) => void), + cb?: (err: any, data?: CreateWorkspaceCommandOutput) => void + ): Promise | void { + const command = new CreateWorkspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Deletes a component type.

                              + */ + public deleteComponentType( + args: DeleteComponentTypeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteComponentType( + args: DeleteComponentTypeCommandInput, + cb: (err: any, data?: DeleteComponentTypeCommandOutput) => void + ): void; + public deleteComponentType( + args: DeleteComponentTypeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteComponentTypeCommandOutput) => void + ): void; + public deleteComponentType( + args: DeleteComponentTypeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteComponentTypeCommandOutput) => void), + cb?: (err: any, data?: DeleteComponentTypeCommandOutput) => void + ): Promise | void { + const command = new DeleteComponentTypeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Deletes an entity.

                              + */ + public deleteEntity( + args: DeleteEntityCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteEntity(args: DeleteEntityCommandInput, cb: (err: any, data?: DeleteEntityCommandOutput) => void): void; + public deleteEntity( + args: DeleteEntityCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteEntityCommandOutput) => void + ): void; + public deleteEntity( + args: DeleteEntityCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteEntityCommandOutput) => void), + cb?: (err: any, data?: DeleteEntityCommandOutput) => void + ): Promise | void { + const command = new DeleteEntityCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Deletes a scene.

                              + */ + public deleteScene(args: DeleteSceneCommandInput, options?: __HttpHandlerOptions): Promise; + public deleteScene(args: DeleteSceneCommandInput, cb: (err: any, data?: DeleteSceneCommandOutput) => void): void; + public deleteScene( + args: DeleteSceneCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteSceneCommandOutput) => void + ): void; + public deleteScene( + args: DeleteSceneCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteSceneCommandOutput) => void), + cb?: (err: any, data?: DeleteSceneCommandOutput) => void + ): Promise | void { + const command = new DeleteSceneCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Deletes a workspace.

                              + */ + public deleteWorkspace( + args: DeleteWorkspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteWorkspace( + args: DeleteWorkspaceCommandInput, + cb: (err: any, data?: DeleteWorkspaceCommandOutput) => void + ): void; + public deleteWorkspace( + args: DeleteWorkspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteWorkspaceCommandOutput) => void + ): void; + public deleteWorkspace( + args: DeleteWorkspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteWorkspaceCommandOutput) => void), + cb?: (err: any, data?: DeleteWorkspaceCommandOutput) => void + ): Promise | void { + const command = new DeleteWorkspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about a component type.

                              + */ + public getComponentType( + args: GetComponentTypeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getComponentType( + args: GetComponentTypeCommandInput, + cb: (err: any, data?: GetComponentTypeCommandOutput) => void + ): void; + public getComponentType( + args: GetComponentTypeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetComponentTypeCommandOutput) => void + ): void; + public getComponentType( + args: GetComponentTypeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetComponentTypeCommandOutput) => void), + cb?: (err: any, data?: GetComponentTypeCommandOutput) => void + ): Promise | void { + const command = new GetComponentTypeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about an entity.

                              + */ + public getEntity(args: GetEntityCommandInput, options?: __HttpHandlerOptions): Promise; + public getEntity(args: GetEntityCommandInput, cb: (err: any, data?: GetEntityCommandOutput) => void): void; + public getEntity( + args: GetEntityCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetEntityCommandOutput) => void + ): void; + public getEntity( + args: GetEntityCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetEntityCommandOutput) => void), + cb?: (err: any, data?: GetEntityCommandOutput) => void + ): Promise | void { + const command = new GetEntityCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Gets the property values for a component, component type, entity, or workspace.

                              + *

                              You must specify a value for either componentName, componentTypeId, entityId, or workspaceId.

                              + */ + public getPropertyValue( + args: GetPropertyValueCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getPropertyValue( + args: GetPropertyValueCommandInput, + cb: (err: any, data?: GetPropertyValueCommandOutput) => void + ): void; + public getPropertyValue( + args: GetPropertyValueCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetPropertyValueCommandOutput) => void + ): void; + public getPropertyValue( + args: GetPropertyValueCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetPropertyValueCommandOutput) => void), + cb?: (err: any, data?: GetPropertyValueCommandOutput) => void + ): Promise | void { + const command = new GetPropertyValueCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about the history of a time series property value for a component, component type, entity, or workspace.

                              + *

                              You must specify a value for workspaceId. For entity-specific queries, specify values for componentName and + * entityId. For cross-entity quries, specify a value for componentTypeId.

                              + */ + public getPropertyValueHistory( + args: GetPropertyValueHistoryCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getPropertyValueHistory( + args: GetPropertyValueHistoryCommandInput, + cb: (err: any, data?: GetPropertyValueHistoryCommandOutput) => void + ): void; + public getPropertyValueHistory( + args: GetPropertyValueHistoryCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetPropertyValueHistoryCommandOutput) => void + ): void; + public getPropertyValueHistory( + args: GetPropertyValueHistoryCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetPropertyValueHistoryCommandOutput) => void), + cb?: (err: any, data?: GetPropertyValueHistoryCommandOutput) => void + ): Promise | void { + const command = new GetPropertyValueHistoryCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about a scene.

                              + */ + public getScene(args: GetSceneCommandInput, options?: __HttpHandlerOptions): Promise; + public getScene(args: GetSceneCommandInput, cb: (err: any, data?: GetSceneCommandOutput) => void): void; + public getScene( + args: GetSceneCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetSceneCommandOutput) => void + ): void; + public getScene( + args: GetSceneCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetSceneCommandOutput) => void), + cb?: (err: any, data?: GetSceneCommandOutput) => void + ): Promise | void { + const command = new GetSceneCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about a workspace.

                              + */ + public getWorkspace( + args: GetWorkspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getWorkspace(args: GetWorkspaceCommandInput, cb: (err: any, data?: GetWorkspaceCommandOutput) => void): void; + public getWorkspace( + args: GetWorkspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetWorkspaceCommandOutput) => void + ): void; + public getWorkspace( + args: GetWorkspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetWorkspaceCommandOutput) => void), + cb?: (err: any, data?: GetWorkspaceCommandOutput) => void + ): Promise | void { + const command = new GetWorkspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Lists all component types in a workspace.

                              + */ + public listComponentTypes( + args: ListComponentTypesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listComponentTypes( + args: ListComponentTypesCommandInput, + cb: (err: any, data?: ListComponentTypesCommandOutput) => void + ): void; + public listComponentTypes( + args: ListComponentTypesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListComponentTypesCommandOutput) => void + ): void; + public listComponentTypes( + args: ListComponentTypesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListComponentTypesCommandOutput) => void), + cb?: (err: any, data?: ListComponentTypesCommandOutput) => void + ): Promise | void { + const command = new ListComponentTypesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Lists all entities in a workspace.

                              + */ + public listEntities( + args: ListEntitiesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listEntities(args: ListEntitiesCommandInput, cb: (err: any, data?: ListEntitiesCommandOutput) => void): void; + public listEntities( + args: ListEntitiesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListEntitiesCommandOutput) => void + ): void; + public listEntities( + args: ListEntitiesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListEntitiesCommandOutput) => void), + cb?: (err: any, data?: ListEntitiesCommandOutput) => void + ): Promise | void { + const command = new ListEntitiesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Lists all scenes in a workspace.

                              + */ + public listScenes(args: ListScenesCommandInput, options?: __HttpHandlerOptions): Promise; + public listScenes(args: ListScenesCommandInput, cb: (err: any, data?: ListScenesCommandOutput) => void): void; + public listScenes( + args: ListScenesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListScenesCommandOutput) => void + ): void; + public listScenes( + args: ListScenesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListScenesCommandOutput) => void), + cb?: (err: any, data?: ListScenesCommandOutput) => void + ): Promise | void { + const command = new ListScenesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Lists all tags associated with a resource.

                              + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Retrieves information about workspaces in the current account.

                              + */ + public listWorkspaces( + args: ListWorkspacesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listWorkspaces( + args: ListWorkspacesCommandInput, + cb: (err: any, data?: ListWorkspacesCommandOutput) => void + ): void; + public listWorkspaces( + args: ListWorkspacesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListWorkspacesCommandOutput) => void + ): void; + public listWorkspaces( + args: ListWorkspacesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListWorkspacesCommandOutput) => void), + cb?: (err: any, data?: ListWorkspacesCommandOutput) => void + ): Promise | void { + const command = new ListWorkspacesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Adds tags to a resource.

                              + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Removes tags from a resource.

                              + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Updates information in a component type.

                              + */ + public updateComponentType( + args: UpdateComponentTypeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateComponentType( + args: UpdateComponentTypeCommandInput, + cb: (err: any, data?: UpdateComponentTypeCommandOutput) => void + ): void; + public updateComponentType( + args: UpdateComponentTypeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateComponentTypeCommandOutput) => void + ): void; + public updateComponentType( + args: UpdateComponentTypeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateComponentTypeCommandOutput) => void), + cb?: (err: any, data?: UpdateComponentTypeCommandOutput) => void + ): Promise | void { + const command = new UpdateComponentTypeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Updates an entity.

                              + */ + public updateEntity( + args: UpdateEntityCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateEntity(args: UpdateEntityCommandInput, cb: (err: any, data?: UpdateEntityCommandOutput) => void): void; + public updateEntity( + args: UpdateEntityCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateEntityCommandOutput) => void + ): void; + public updateEntity( + args: UpdateEntityCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateEntityCommandOutput) => void), + cb?: (err: any, data?: UpdateEntityCommandOutput) => void + ): Promise | void { + const command = new UpdateEntityCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Updates a scene.

                              + */ + public updateScene(args: UpdateSceneCommandInput, options?: __HttpHandlerOptions): Promise; + public updateScene(args: UpdateSceneCommandInput, cb: (err: any, data?: UpdateSceneCommandOutput) => void): void; + public updateScene( + args: UpdateSceneCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateSceneCommandOutput) => void + ): void; + public updateScene( + args: UpdateSceneCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateSceneCommandOutput) => void), + cb?: (err: any, data?: UpdateSceneCommandOutput) => void + ): Promise | void { + const command = new UpdateSceneCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                              Updates a workspace.

                              + */ + public updateWorkspace( + args: UpdateWorkspaceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateWorkspace( + args: UpdateWorkspaceCommandInput, + cb: (err: any, data?: UpdateWorkspaceCommandOutput) => void + ): void; + public updateWorkspace( + args: UpdateWorkspaceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateWorkspaceCommandOutput) => void + ): void; + public updateWorkspace( + args: UpdateWorkspaceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateWorkspaceCommandOutput) => void), + cb?: (err: any, data?: UpdateWorkspaceCommandOutput) => void + ): Promise | void { + const command = new UpdateWorkspaceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-iottwinmaker/src/IoTTwinMakerClient.ts b/clients/client-iottwinmaker/src/IoTTwinMakerClient.ts new file mode 100644 index 000000000000..91b60ffb2f0f --- /dev/null +++ b/clients/client-iottwinmaker/src/IoTTwinMakerClient.ts @@ -0,0 +1,351 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { + BatchPutPropertyValuesCommandInput, + BatchPutPropertyValuesCommandOutput, +} from "./commands/BatchPutPropertyValuesCommand"; +import { + CreateComponentTypeCommandInput, + CreateComponentTypeCommandOutput, +} from "./commands/CreateComponentTypeCommand"; +import { CreateEntityCommandInput, CreateEntityCommandOutput } from "./commands/CreateEntityCommand"; +import { CreateSceneCommandInput, CreateSceneCommandOutput } from "./commands/CreateSceneCommand"; +import { CreateWorkspaceCommandInput, CreateWorkspaceCommandOutput } from "./commands/CreateWorkspaceCommand"; +import { + DeleteComponentTypeCommandInput, + DeleteComponentTypeCommandOutput, +} from "./commands/DeleteComponentTypeCommand"; +import { DeleteEntityCommandInput, DeleteEntityCommandOutput } from "./commands/DeleteEntityCommand"; +import { DeleteSceneCommandInput, DeleteSceneCommandOutput } from "./commands/DeleteSceneCommand"; +import { DeleteWorkspaceCommandInput, DeleteWorkspaceCommandOutput } from "./commands/DeleteWorkspaceCommand"; +import { GetComponentTypeCommandInput, GetComponentTypeCommandOutput } from "./commands/GetComponentTypeCommand"; +import { GetEntityCommandInput, GetEntityCommandOutput } from "./commands/GetEntityCommand"; +import { GetPropertyValueCommandInput, GetPropertyValueCommandOutput } from "./commands/GetPropertyValueCommand"; +import { + GetPropertyValueHistoryCommandInput, + GetPropertyValueHistoryCommandOutput, +} from "./commands/GetPropertyValueHistoryCommand"; +import { GetSceneCommandInput, GetSceneCommandOutput } from "./commands/GetSceneCommand"; +import { GetWorkspaceCommandInput, GetWorkspaceCommandOutput } from "./commands/GetWorkspaceCommand"; +import { ListComponentTypesCommandInput, ListComponentTypesCommandOutput } from "./commands/ListComponentTypesCommand"; +import { ListEntitiesCommandInput, ListEntitiesCommandOutput } from "./commands/ListEntitiesCommand"; +import { ListScenesCommandInput, ListScenesCommandOutput } from "./commands/ListScenesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { ListWorkspacesCommandInput, ListWorkspacesCommandOutput } from "./commands/ListWorkspacesCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateComponentTypeCommandInput, + UpdateComponentTypeCommandOutput, +} from "./commands/UpdateComponentTypeCommand"; +import { UpdateEntityCommandInput, UpdateEntityCommandOutput } from "./commands/UpdateEntityCommand"; +import { UpdateSceneCommandInput, UpdateSceneCommandOutput } from "./commands/UpdateSceneCommand"; +import { UpdateWorkspaceCommandInput, UpdateWorkspaceCommandOutput } from "./commands/UpdateWorkspaceCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | BatchPutPropertyValuesCommandInput + | CreateComponentTypeCommandInput + | CreateEntityCommandInput + | CreateSceneCommandInput + | CreateWorkspaceCommandInput + | DeleteComponentTypeCommandInput + | DeleteEntityCommandInput + | DeleteSceneCommandInput + | DeleteWorkspaceCommandInput + | GetComponentTypeCommandInput + | GetEntityCommandInput + | GetPropertyValueCommandInput + | GetPropertyValueHistoryCommandInput + | GetSceneCommandInput + | GetWorkspaceCommandInput + | ListComponentTypesCommandInput + | ListEntitiesCommandInput + | ListScenesCommandInput + | ListTagsForResourceCommandInput + | ListWorkspacesCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateComponentTypeCommandInput + | UpdateEntityCommandInput + | UpdateSceneCommandInput + | UpdateWorkspaceCommandInput; + +export type ServiceOutputTypes = + | BatchPutPropertyValuesCommandOutput + | CreateComponentTypeCommandOutput + | CreateEntityCommandOutput + | CreateSceneCommandOutput + | CreateWorkspaceCommandOutput + | DeleteComponentTypeCommandOutput + | DeleteEntityCommandOutput + | DeleteSceneCommandOutput + | DeleteWorkspaceCommandOutput + | GetComponentTypeCommandOutput + | GetEntityCommandOutput + | GetPropertyValueCommandOutput + | GetPropertyValueHistoryCommandOutput + | GetSceneCommandOutput + | GetWorkspaceCommandOutput + | ListComponentTypesCommandOutput + | ListEntitiesCommandOutput + | ListScenesCommandOutput + | ListTagsForResourceCommandOutput + | ListWorkspacesCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateComponentTypeCommandOutput + | UpdateEntityCommandOutput + | UpdateSceneCommandOutput + | UpdateWorkspaceCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type IoTTwinMakerClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of IoTTwinMakerClient class constructor that set the region, credentials and other options. + */ +export interface IoTTwinMakerClientConfig extends IoTTwinMakerClientConfigType {} + +type IoTTwinMakerClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of IoTTwinMakerClient class. This is resolved and normalized from the {@link IoTTwinMakerClientConfig | constructor configuration interface}. + */ +export interface IoTTwinMakerClientResolvedConfig extends IoTTwinMakerClientResolvedConfigType {} + +/** + * + *

                              + * TwinMaker is in public preview and is subject to change. + *

                              + *
                              + *

                              IoT TwinMaker is a service that enables you to build operational digital twins of + * physical systems. IoT TwinMaker overlays measurements and analysis from real-world sensors, + * cameras, and enterprise applications so you can create data visualizations to monitor your + * physical factory, building, or industrial plant. You can use this real-world data to + * monitor operations and diagnose and repair errors.

                              + */ +export class IoTTwinMakerClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + IoTTwinMakerClientResolvedConfig +> { + /** + * The resolved configuration of IoTTwinMakerClient class. This is resolved and normalized from the {@link IoTTwinMakerClientConfig | constructor configuration interface}. + */ + readonly config: IoTTwinMakerClientResolvedConfig; + + constructor(configuration: IoTTwinMakerClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-iottwinmaker/src/commands/BatchPutPropertyValuesCommand.ts b/clients/client-iottwinmaker/src/commands/BatchPutPropertyValuesCommand.ts new file mode 100644 index 000000000000..a999f7f6a51d --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/BatchPutPropertyValuesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { BatchPutPropertyValuesRequest, BatchPutPropertyValuesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1BatchPutPropertyValuesCommand, + serializeAws_restJson1BatchPutPropertyValuesCommand, +} from "../protocols/Aws_restJson1"; + +export interface BatchPutPropertyValuesCommandInput extends BatchPutPropertyValuesRequest {} +export interface BatchPutPropertyValuesCommandOutput extends BatchPutPropertyValuesResponse, __MetadataBearer {} + +/** + *

                              Sets values for multiple time series properties.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, BatchPutPropertyValuesCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, BatchPutPropertyValuesCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new BatchPutPropertyValuesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link BatchPutPropertyValuesCommandInput} for command's `input` shape. + * @see {@link BatchPutPropertyValuesCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class BatchPutPropertyValuesCommand extends $Command< + BatchPutPropertyValuesCommandInput, + BatchPutPropertyValuesCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: BatchPutPropertyValuesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "BatchPutPropertyValuesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: BatchPutPropertyValuesRequest.filterSensitiveLog, + outputFilterSensitiveLog: BatchPutPropertyValuesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: BatchPutPropertyValuesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1BatchPutPropertyValuesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1BatchPutPropertyValuesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/CreateComponentTypeCommand.ts b/clients/client-iottwinmaker/src/commands/CreateComponentTypeCommand.ts new file mode 100644 index 000000000000..cb33f7d69352 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/CreateComponentTypeCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { CreateComponentTypeRequest, CreateComponentTypeResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateComponentTypeCommand, + serializeAws_restJson1CreateComponentTypeCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateComponentTypeCommandInput extends CreateComponentTypeRequest {} +export interface CreateComponentTypeCommandOutput extends CreateComponentTypeResponse, __MetadataBearer {} + +/** + *

                              Creates a component type.

                              + * + *

                              + * TwinMaker is in public preview and is subject to change. + *

                              + *
                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, CreateComponentTypeCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, CreateComponentTypeCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new CreateComponentTypeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateComponentTypeCommandInput} for command's `input` shape. + * @see {@link CreateComponentTypeCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class CreateComponentTypeCommand extends $Command< + CreateComponentTypeCommandInput, + CreateComponentTypeCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateComponentTypeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "CreateComponentTypeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateComponentTypeRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateComponentTypeResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateComponentTypeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateComponentTypeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateComponentTypeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/CreateEntityCommand.ts b/clients/client-iottwinmaker/src/commands/CreateEntityCommand.ts new file mode 100644 index 000000000000..dce301682e3f --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/CreateEntityCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { CreateEntityRequest, CreateEntityResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateEntityCommand, + serializeAws_restJson1CreateEntityCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateEntityCommandInput extends CreateEntityRequest {} +export interface CreateEntityCommandOutput extends CreateEntityResponse, __MetadataBearer {} + +/** + *

                              Creates an entity.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, CreateEntityCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, CreateEntityCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new CreateEntityCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateEntityCommandInput} for command's `input` shape. + * @see {@link CreateEntityCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class CreateEntityCommand extends $Command< + CreateEntityCommandInput, + CreateEntityCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateEntityCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "CreateEntityCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateEntityRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateEntityResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateEntityCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateEntityCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateEntityCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/CreateSceneCommand.ts b/clients/client-iottwinmaker/src/commands/CreateSceneCommand.ts new file mode 100644 index 000000000000..c9a12bff8d63 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/CreateSceneCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { CreateSceneRequest, CreateSceneResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateSceneCommand, + serializeAws_restJson1CreateSceneCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateSceneCommandInput extends CreateSceneRequest {} +export interface CreateSceneCommandOutput extends CreateSceneResponse, __MetadataBearer {} + +/** + *

                              Creates a scene.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, CreateSceneCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, CreateSceneCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new CreateSceneCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateSceneCommandInput} for command's `input` shape. + * @see {@link CreateSceneCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class CreateSceneCommand extends $Command< + CreateSceneCommandInput, + CreateSceneCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateSceneCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "CreateSceneCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateSceneRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateSceneResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateSceneCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateSceneCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateSceneCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/CreateWorkspaceCommand.ts b/clients/client-iottwinmaker/src/commands/CreateWorkspaceCommand.ts new file mode 100644 index 000000000000..93324a58001f --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/CreateWorkspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { CreateWorkspaceRequest, CreateWorkspaceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateWorkspaceCommand, + serializeAws_restJson1CreateWorkspaceCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateWorkspaceCommandInput extends CreateWorkspaceRequest {} +export interface CreateWorkspaceCommandOutput extends CreateWorkspaceResponse, __MetadataBearer {} + +/** + *

                              Creates a workplace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, CreateWorkspaceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, CreateWorkspaceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new CreateWorkspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateWorkspaceCommandInput} for command's `input` shape. + * @see {@link CreateWorkspaceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class CreateWorkspaceCommand extends $Command< + CreateWorkspaceCommandInput, + CreateWorkspaceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateWorkspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "CreateWorkspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateWorkspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateWorkspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateWorkspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateWorkspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateWorkspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/DeleteComponentTypeCommand.ts b/clients/client-iottwinmaker/src/commands/DeleteComponentTypeCommand.ts new file mode 100644 index 000000000000..7890b1c23de9 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/DeleteComponentTypeCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { DeleteComponentTypeRequest, DeleteComponentTypeResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteComponentTypeCommand, + serializeAws_restJson1DeleteComponentTypeCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteComponentTypeCommandInput extends DeleteComponentTypeRequest {} +export interface DeleteComponentTypeCommandOutput extends DeleteComponentTypeResponse, __MetadataBearer {} + +/** + *

                              Deletes a component type.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, DeleteComponentTypeCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, DeleteComponentTypeCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new DeleteComponentTypeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteComponentTypeCommandInput} for command's `input` shape. + * @see {@link DeleteComponentTypeCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class DeleteComponentTypeCommand extends $Command< + DeleteComponentTypeCommandInput, + DeleteComponentTypeCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteComponentTypeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "DeleteComponentTypeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteComponentTypeRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteComponentTypeResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteComponentTypeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteComponentTypeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteComponentTypeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/DeleteEntityCommand.ts b/clients/client-iottwinmaker/src/commands/DeleteEntityCommand.ts new file mode 100644 index 000000000000..c59756e48750 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/DeleteEntityCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { DeleteEntityRequest, DeleteEntityResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteEntityCommand, + serializeAws_restJson1DeleteEntityCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteEntityCommandInput extends DeleteEntityRequest {} +export interface DeleteEntityCommandOutput extends DeleteEntityResponse, __MetadataBearer {} + +/** + *

                              Deletes an entity.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, DeleteEntityCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, DeleteEntityCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new DeleteEntityCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteEntityCommandInput} for command's `input` shape. + * @see {@link DeleteEntityCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class DeleteEntityCommand extends $Command< + DeleteEntityCommandInput, + DeleteEntityCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteEntityCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "DeleteEntityCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteEntityRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteEntityResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteEntityCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteEntityCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteEntityCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/DeleteSceneCommand.ts b/clients/client-iottwinmaker/src/commands/DeleteSceneCommand.ts new file mode 100644 index 000000000000..0541f1ef82fb --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/DeleteSceneCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { DeleteSceneRequest, DeleteSceneResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteSceneCommand, + serializeAws_restJson1DeleteSceneCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteSceneCommandInput extends DeleteSceneRequest {} +export interface DeleteSceneCommandOutput extends DeleteSceneResponse, __MetadataBearer {} + +/** + *

                              Deletes a scene.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, DeleteSceneCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, DeleteSceneCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new DeleteSceneCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteSceneCommandInput} for command's `input` shape. + * @see {@link DeleteSceneCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class DeleteSceneCommand extends $Command< + DeleteSceneCommandInput, + DeleteSceneCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteSceneCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "DeleteSceneCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteSceneRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteSceneResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteSceneCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteSceneCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteSceneCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/DeleteWorkspaceCommand.ts b/clients/client-iottwinmaker/src/commands/DeleteWorkspaceCommand.ts new file mode 100644 index 000000000000..080c25340246 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/DeleteWorkspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { DeleteWorkspaceRequest, DeleteWorkspaceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteWorkspaceCommand, + serializeAws_restJson1DeleteWorkspaceCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteWorkspaceCommandInput extends DeleteWorkspaceRequest {} +export interface DeleteWorkspaceCommandOutput extends DeleteWorkspaceResponse, __MetadataBearer {} + +/** + *

                              Deletes a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, DeleteWorkspaceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, DeleteWorkspaceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new DeleteWorkspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteWorkspaceCommandInput} for command's `input` shape. + * @see {@link DeleteWorkspaceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class DeleteWorkspaceCommand extends $Command< + DeleteWorkspaceCommandInput, + DeleteWorkspaceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteWorkspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "DeleteWorkspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteWorkspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteWorkspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteWorkspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteWorkspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteWorkspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetComponentTypeCommand.ts b/clients/client-iottwinmaker/src/commands/GetComponentTypeCommand.ts new file mode 100644 index 000000000000..992e7f4d5dbf --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetComponentTypeCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetComponentTypeRequest, GetComponentTypeResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetComponentTypeCommand, + serializeAws_restJson1GetComponentTypeCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetComponentTypeCommandInput extends GetComponentTypeRequest {} +export interface GetComponentTypeCommandOutput extends GetComponentTypeResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about a component type.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetComponentTypeCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetComponentTypeCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetComponentTypeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetComponentTypeCommandInput} for command's `input` shape. + * @see {@link GetComponentTypeCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetComponentTypeCommand extends $Command< + GetComponentTypeCommandInput, + GetComponentTypeCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetComponentTypeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetComponentTypeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetComponentTypeRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetComponentTypeResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetComponentTypeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetComponentTypeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetComponentTypeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetEntityCommand.ts b/clients/client-iottwinmaker/src/commands/GetEntityCommand.ts new file mode 100644 index 000000000000..f40b81c45b72 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetEntityCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetEntityRequest, GetEntityResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetEntityCommand, + serializeAws_restJson1GetEntityCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetEntityCommandInput extends GetEntityRequest {} +export interface GetEntityCommandOutput extends GetEntityResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about an entity.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetEntityCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetEntityCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetEntityCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetEntityCommandInput} for command's `input` shape. + * @see {@link GetEntityCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetEntityCommand extends $Command< + GetEntityCommandInput, + GetEntityCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetEntityCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetEntityCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetEntityRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetEntityResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetEntityCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetEntityCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetEntityCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetPropertyValueCommand.ts b/clients/client-iottwinmaker/src/commands/GetPropertyValueCommand.ts new file mode 100644 index 000000000000..d7b34a9810ab --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetPropertyValueCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetPropertyValueRequest, GetPropertyValueResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetPropertyValueCommand, + serializeAws_restJson1GetPropertyValueCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetPropertyValueCommandInput extends GetPropertyValueRequest {} +export interface GetPropertyValueCommandOutput extends GetPropertyValueResponse, __MetadataBearer {} + +/** + *

                              Gets the property values for a component, component type, entity, or workspace.

                              + *

                              You must specify a value for either componentName, componentTypeId, entityId, or workspaceId.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetPropertyValueCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetPropertyValueCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetPropertyValueCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetPropertyValueCommandInput} for command's `input` shape. + * @see {@link GetPropertyValueCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetPropertyValueCommand extends $Command< + GetPropertyValueCommandInput, + GetPropertyValueCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetPropertyValueCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetPropertyValueCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetPropertyValueRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetPropertyValueResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetPropertyValueCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetPropertyValueCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetPropertyValueCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetPropertyValueHistoryCommand.ts b/clients/client-iottwinmaker/src/commands/GetPropertyValueHistoryCommand.ts new file mode 100644 index 000000000000..818a327c5c3f --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetPropertyValueHistoryCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetPropertyValueHistoryRequest, GetPropertyValueHistoryResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetPropertyValueHistoryCommand, + serializeAws_restJson1GetPropertyValueHistoryCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetPropertyValueHistoryCommandInput extends GetPropertyValueHistoryRequest {} +export interface GetPropertyValueHistoryCommandOutput extends GetPropertyValueHistoryResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about the history of a time series property value for a component, component type, entity, or workspace.

                              + *

                              You must specify a value for workspaceId. For entity-specific queries, specify values for componentName and + * entityId. For cross-entity quries, specify a value for componentTypeId.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetPropertyValueHistoryCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetPropertyValueHistoryCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetPropertyValueHistoryCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetPropertyValueHistoryCommandInput} for command's `input` shape. + * @see {@link GetPropertyValueHistoryCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetPropertyValueHistoryCommand extends $Command< + GetPropertyValueHistoryCommandInput, + GetPropertyValueHistoryCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetPropertyValueHistoryCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetPropertyValueHistoryCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetPropertyValueHistoryRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetPropertyValueHistoryResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetPropertyValueHistoryCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetPropertyValueHistoryCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetPropertyValueHistoryCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetSceneCommand.ts b/clients/client-iottwinmaker/src/commands/GetSceneCommand.ts new file mode 100644 index 000000000000..5f461511a421 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetSceneCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetSceneRequest, GetSceneResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetSceneCommand, + serializeAws_restJson1GetSceneCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetSceneCommandInput extends GetSceneRequest {} +export interface GetSceneCommandOutput extends GetSceneResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about a scene.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetSceneCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetSceneCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetSceneCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetSceneCommandInput} for command's `input` shape. + * @see {@link GetSceneCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetSceneCommand extends $Command< + GetSceneCommandInput, + GetSceneCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetSceneCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetSceneCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetSceneRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetSceneResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetSceneCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetSceneCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetSceneCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/GetWorkspaceCommand.ts b/clients/client-iottwinmaker/src/commands/GetWorkspaceCommand.ts new file mode 100644 index 000000000000..1768d0d42003 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/GetWorkspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { GetWorkspaceRequest, GetWorkspaceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetWorkspaceCommand, + serializeAws_restJson1GetWorkspaceCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetWorkspaceCommandInput extends GetWorkspaceRequest {} +export interface GetWorkspaceCommandOutput extends GetWorkspaceResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, GetWorkspaceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, GetWorkspaceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new GetWorkspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetWorkspaceCommandInput} for command's `input` shape. + * @see {@link GetWorkspaceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class GetWorkspaceCommand extends $Command< + GetWorkspaceCommandInput, + GetWorkspaceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetWorkspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "GetWorkspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetWorkspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetWorkspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetWorkspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetWorkspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetWorkspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/ListComponentTypesCommand.ts b/clients/client-iottwinmaker/src/commands/ListComponentTypesCommand.ts new file mode 100644 index 000000000000..7bde51e3434a --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/ListComponentTypesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { ListComponentTypesRequest, ListComponentTypesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListComponentTypesCommand, + serializeAws_restJson1ListComponentTypesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListComponentTypesCommandInput extends ListComponentTypesRequest {} +export interface ListComponentTypesCommandOutput extends ListComponentTypesResponse, __MetadataBearer {} + +/** + *

                              Lists all component types in a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, ListComponentTypesCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, ListComponentTypesCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new ListComponentTypesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListComponentTypesCommandInput} for command's `input` shape. + * @see {@link ListComponentTypesCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class ListComponentTypesCommand extends $Command< + ListComponentTypesCommandInput, + ListComponentTypesCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListComponentTypesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "ListComponentTypesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListComponentTypesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListComponentTypesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListComponentTypesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListComponentTypesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListComponentTypesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/ListEntitiesCommand.ts b/clients/client-iottwinmaker/src/commands/ListEntitiesCommand.ts new file mode 100644 index 000000000000..d0b4614b683c --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/ListEntitiesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { ListEntitiesRequest, ListEntitiesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListEntitiesCommand, + serializeAws_restJson1ListEntitiesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListEntitiesCommandInput extends ListEntitiesRequest {} +export interface ListEntitiesCommandOutput extends ListEntitiesResponse, __MetadataBearer {} + +/** + *

                              Lists all entities in a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, ListEntitiesCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, ListEntitiesCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new ListEntitiesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListEntitiesCommandInput} for command's `input` shape. + * @see {@link ListEntitiesCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class ListEntitiesCommand extends $Command< + ListEntitiesCommandInput, + ListEntitiesCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListEntitiesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "ListEntitiesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListEntitiesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListEntitiesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListEntitiesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListEntitiesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListEntitiesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/ListScenesCommand.ts b/clients/client-iottwinmaker/src/commands/ListScenesCommand.ts new file mode 100644 index 000000000000..c40c3710160a --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/ListScenesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { ListScenesRequest, ListScenesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListScenesCommand, + serializeAws_restJson1ListScenesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListScenesCommandInput extends ListScenesRequest {} +export interface ListScenesCommandOutput extends ListScenesResponse, __MetadataBearer {} + +/** + *

                              Lists all scenes in a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, ListScenesCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, ListScenesCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new ListScenesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListScenesCommandInput} for command's `input` shape. + * @see {@link ListScenesCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class ListScenesCommand extends $Command< + ListScenesCommandInput, + ListScenesCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListScenesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "ListScenesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListScenesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListScenesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListScenesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListScenesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListScenesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/ListTagsForResourceCommand.ts b/clients/client-iottwinmaker/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..7d5347636256 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                              Lists all tags associated with a resource.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, ListTagsForResourceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, ListTagsForResourceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/ListWorkspacesCommand.ts b/clients/client-iottwinmaker/src/commands/ListWorkspacesCommand.ts new file mode 100644 index 000000000000..dfc3f73edc1c --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/ListWorkspacesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { ListWorkspacesRequest, ListWorkspacesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListWorkspacesCommand, + serializeAws_restJson1ListWorkspacesCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListWorkspacesCommandInput extends ListWorkspacesRequest {} +export interface ListWorkspacesCommandOutput extends ListWorkspacesResponse, __MetadataBearer {} + +/** + *

                              Retrieves information about workspaces in the current account.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, ListWorkspacesCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, ListWorkspacesCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new ListWorkspacesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListWorkspacesCommandInput} for command's `input` shape. + * @see {@link ListWorkspacesCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class ListWorkspacesCommand extends $Command< + ListWorkspacesCommandInput, + ListWorkspacesCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListWorkspacesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "ListWorkspacesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListWorkspacesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListWorkspacesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListWorkspacesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListWorkspacesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListWorkspacesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/TagResourceCommand.ts b/clients/client-iottwinmaker/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..9f30056624c4 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/TagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                              Adds tags to a resource.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, TagResourceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, TagResourceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/UntagResourceCommand.ts b/clients/client-iottwinmaker/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..9f4989bfdf92 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                              Removes tags from a resource.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, UntagResourceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, UntagResourceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/UpdateComponentTypeCommand.ts b/clients/client-iottwinmaker/src/commands/UpdateComponentTypeCommand.ts new file mode 100644 index 000000000000..0ae606db1647 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/UpdateComponentTypeCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { UpdateComponentTypeRequest, UpdateComponentTypeResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateComponentTypeCommand, + serializeAws_restJson1UpdateComponentTypeCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateComponentTypeCommandInput extends UpdateComponentTypeRequest {} +export interface UpdateComponentTypeCommandOutput extends UpdateComponentTypeResponse, __MetadataBearer {} + +/** + *

                              Updates information in a component type.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, UpdateComponentTypeCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, UpdateComponentTypeCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new UpdateComponentTypeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateComponentTypeCommandInput} for command's `input` shape. + * @see {@link UpdateComponentTypeCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class UpdateComponentTypeCommand extends $Command< + UpdateComponentTypeCommandInput, + UpdateComponentTypeCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateComponentTypeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "UpdateComponentTypeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateComponentTypeRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateComponentTypeResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateComponentTypeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateComponentTypeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateComponentTypeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/UpdateEntityCommand.ts b/clients/client-iottwinmaker/src/commands/UpdateEntityCommand.ts new file mode 100644 index 000000000000..2f474b81afab --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/UpdateEntityCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { UpdateEntityRequest, UpdateEntityResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateEntityCommand, + serializeAws_restJson1UpdateEntityCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateEntityCommandInput extends UpdateEntityRequest {} +export interface UpdateEntityCommandOutput extends UpdateEntityResponse, __MetadataBearer {} + +/** + *

                              Updates an entity.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, UpdateEntityCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, UpdateEntityCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new UpdateEntityCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateEntityCommandInput} for command's `input` shape. + * @see {@link UpdateEntityCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class UpdateEntityCommand extends $Command< + UpdateEntityCommandInput, + UpdateEntityCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateEntityCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "UpdateEntityCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateEntityRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateEntityResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateEntityCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateEntityCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateEntityCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/UpdateSceneCommand.ts b/clients/client-iottwinmaker/src/commands/UpdateSceneCommand.ts new file mode 100644 index 000000000000..6b4c318ca40d --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/UpdateSceneCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { UpdateSceneRequest, UpdateSceneResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateSceneCommand, + serializeAws_restJson1UpdateSceneCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateSceneCommandInput extends UpdateSceneRequest {} +export interface UpdateSceneCommandOutput extends UpdateSceneResponse, __MetadataBearer {} + +/** + *

                              Updates a scene.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, UpdateSceneCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, UpdateSceneCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new UpdateSceneCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateSceneCommandInput} for command's `input` shape. + * @see {@link UpdateSceneCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class UpdateSceneCommand extends $Command< + UpdateSceneCommandInput, + UpdateSceneCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateSceneCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "UpdateSceneCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateSceneRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateSceneResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateSceneCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateSceneCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateSceneCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/UpdateWorkspaceCommand.ts b/clients/client-iottwinmaker/src/commands/UpdateWorkspaceCommand.ts new file mode 100644 index 000000000000..ec048700df06 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/UpdateWorkspaceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { IoTTwinMakerClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../IoTTwinMakerClient"; +import { UpdateWorkspaceRequest, UpdateWorkspaceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateWorkspaceCommand, + serializeAws_restJson1UpdateWorkspaceCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateWorkspaceCommandInput extends UpdateWorkspaceRequest {} +export interface UpdateWorkspaceCommandOutput extends UpdateWorkspaceResponse, __MetadataBearer {} + +/** + *

                              Updates a workspace.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { IoTTwinMakerClient, UpdateWorkspaceCommand } from "@aws-sdk/client-iottwinmaker"; // ES Modules import + * // const { IoTTwinMakerClient, UpdateWorkspaceCommand } = require("@aws-sdk/client-iottwinmaker"); // CommonJS import + * const client = new IoTTwinMakerClient(config); + * const command = new UpdateWorkspaceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateWorkspaceCommandInput} for command's `input` shape. + * @see {@link UpdateWorkspaceCommandOutput} for command's `response` shape. + * @see {@link IoTTwinMakerClientResolvedConfig | config} for IoTTwinMakerClient's `config` shape. + * + */ +export class UpdateWorkspaceCommand extends $Command< + UpdateWorkspaceCommandInput, + UpdateWorkspaceCommandOutput, + IoTTwinMakerClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateWorkspaceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: IoTTwinMakerClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "IoTTwinMakerClient"; + const commandName = "UpdateWorkspaceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateWorkspaceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateWorkspaceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateWorkspaceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateWorkspaceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateWorkspaceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-iottwinmaker/src/commands/index.ts b/clients/client-iottwinmaker/src/commands/index.ts new file mode 100644 index 000000000000..3632098859c5 --- /dev/null +++ b/clients/client-iottwinmaker/src/commands/index.ts @@ -0,0 +1,26 @@ +export * from "./BatchPutPropertyValuesCommand"; +export * from "./CreateComponentTypeCommand"; +export * from "./CreateEntityCommand"; +export * from "./CreateSceneCommand"; +export * from "./CreateWorkspaceCommand"; +export * from "./DeleteComponentTypeCommand"; +export * from "./DeleteEntityCommand"; +export * from "./DeleteSceneCommand"; +export * from "./DeleteWorkspaceCommand"; +export * from "./GetComponentTypeCommand"; +export * from "./GetEntityCommand"; +export * from "./GetPropertyValueCommand"; +export * from "./GetPropertyValueHistoryCommand"; +export * from "./GetSceneCommand"; +export * from "./GetWorkspaceCommand"; +export * from "./ListComponentTypesCommand"; +export * from "./ListEntitiesCommand"; +export * from "./ListScenesCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./ListWorkspacesCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateComponentTypeCommand"; +export * from "./UpdateEntityCommand"; +export * from "./UpdateSceneCommand"; +export * from "./UpdateWorkspaceCommand"; diff --git a/clients/client-iottwinmaker/src/endpoints.ts b/clients/client-iottwinmaker/src/endpoints.ts new file mode 100644 index 000000000000..2db3610708f5 --- /dev/null +++ b/clients/client-iottwinmaker/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "iottwinmaker.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "iottwinmaker-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "iottwinmaker-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "iottwinmaker.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "iottwinmaker.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "iottwinmaker-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "iottwinmaker-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "iottwinmaker.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "iottwinmaker.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "iottwinmaker-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "iottwinmaker.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "iottwinmaker-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "iottwinmaker.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "iottwinmaker-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "iottwinmaker-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "iottwinmaker.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "iottwinmaker", + regionHash, + partitionHash, + }); diff --git a/clients/client-iottwinmaker/src/index.ts b/clients/client-iottwinmaker/src/index.ts new file mode 100644 index 000000000000..3f20e0608d27 --- /dev/null +++ b/clients/client-iottwinmaker/src/index.ts @@ -0,0 +1,5 @@ +export * from "./IoTTwinMaker"; +export * from "./IoTTwinMakerClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-iottwinmaker/src/models/index.ts b/clients/client-iottwinmaker/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-iottwinmaker/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-iottwinmaker/src/models/models_0.ts b/clients/client-iottwinmaker/src/models/models_0.ts new file mode 100644 index 000000000000..d0e14c8fdc3f --- /dev/null +++ b/clients/client-iottwinmaker/src/models/models_0.ts @@ -0,0 +1,2856 @@ +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                              Access is denied.

                              + */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + message?: string; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +/** + *

                              An object that uniquely identifies an entity property.

                              + */ +export interface EntityPropertyReference { + /** + *

                              The name of the component.

                              + */ + componentName?: string; + + /** + *

                              A mapping of external IDs to property names. External IDs uniquely identify properties from external data stores.

                              + */ + externalIdProperty?: { [key: string]: string }; + + /** + *

                              The ID of the entity.

                              + */ + entityId?: string; + + /** + *

                              The name of the property.

                              + */ + propertyName: string | undefined; +} + +export namespace EntityPropertyReference { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntityPropertyReference): any => ({ + ...obj, + }); +} + +/** + *

                              A value that associates a component and an entity.

                              + */ +export interface RelationshipValue { + /** + *

                              The ID of the target entity associated with this relationship value.

                              + */ + targetEntityId?: string; + + /** + *

                              The name of the target component associated with the relationship value.

                              + */ + targetComponentName?: string; +} + +export namespace RelationshipValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RelationshipValue): any => ({ + ...obj, + }); +} + +/** + *

                              An unexpected error has occurred.

                              + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + message?: string; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

                              The resource wasn't found.

                              + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +/** + *

                              The rate exceeds the limit.

                              + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message?: string; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + +/** + *

                              Failed

                              + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message?: string; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +/** + *

                              A conflict occurred.

                              + */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *

                              The Lambda function.

                              + */ +export interface LambdaFunction { + /** + *

                              The ARN of the Lambda function.

                              + */ + arn: string | undefined; +} + +export namespace LambdaFunction { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LambdaFunction): any => ({ + ...obj, + }); +} + +/** + *

                              The data connector.

                              + */ +export interface DataConnector { + /** + *

                              The Lambda function associated with this data connector.

                              + */ + lambda?: LambdaFunction; + + /** + *

                              A Boolean value that specifies whether the data connector is native to TwinMaker.

                              + */ + isNative?: boolean; +} + +export namespace DataConnector { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataConnector): any => ({ + ...obj, + }); +} + +export enum Scope { + ENTITY = "ENTITY", + WORKSPACE = "WORKSPACE", +} + +/** + *

                              The function request body.

                              + */ +export interface FunctionRequest { + /** + *

                              The required properties of the function.

                              + */ + requiredProperties?: string[]; + + /** + *

                              The scope of the function.

                              + */ + scope?: Scope | string; + + /** + *

                              The data connector.

                              + */ + implementedBy?: DataConnector; +} + +export namespace FunctionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FunctionRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that specifies a relationship with another component type.

                              + */ +export interface Relationship { + /** + *

                              The ID of the target component type associated with this relationship.

                              + */ + targetComponentTypeId?: string; + + /** + *

                              The type of the relationship.

                              + */ + relationshipType?: string; +} + +export namespace Relationship { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Relationship): any => ({ + ...obj, + }); +} + +export enum Type { + BOOLEAN = "BOOLEAN", + DOUBLE = "DOUBLE", + INTEGER = "INTEGER", + LIST = "LIST", + LONG = "LONG", + MAP = "MAP", + RELATIONSHIP = "RELATIONSHIP", + STRING = "STRING", +} + +export enum State { + ACTIVE = "ACTIVE", + CREATING = "CREATING", + DELETING = "DELETING", + ERROR = "ERROR", + UPDATING = "UPDATING", +} + +export interface CreateComponentTypeResponse { + /** + *

                              The ARN of the component type.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the entity was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The current state of the component type.

                              + */ + state: State | string | undefined; +} + +export namespace CreateComponentTypeResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateComponentTypeResponse): any => ({ + ...obj, + }); +} + +/** + *

                              The service quota was exceeded.

                              + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + message?: string; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +export enum PropertyUpdateType { + DELETE = "DELETE", + UPDATE = "UPDATE", +} + +export interface CreateEntityResponse { + /** + *

                              The ID of the entity.

                              + */ + entityId: string | undefined; + + /** + *

                              The ARN of the entity.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the entity was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The current state of the entity.

                              + */ + state: State | string | undefined; +} + +export namespace CreateEntityResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEntityResponse): any => ({ + ...obj, + }); +} + +export interface CreateSceneRequest { + /** + *

                              The ID of the workspace that contains the scene.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the scene.

                              + */ + sceneId: string | undefined; + + /** + *

                              The relative path that specifies the location of the content definition file.

                              + */ + contentLocation: string | undefined; + + /** + *

                              The description for this scene.

                              + */ + description?: string; + + /** + *

                              A list of capabilities that the scene uses to render itself.

                              + */ + capabilities?: string[]; + + /** + *

                              Metadata that you can use to manage the scene.

                              + */ + tags?: { [key: string]: string }; +} + +export namespace CreateSceneRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateSceneRequest): any => ({ + ...obj, + }); +} + +export interface CreateSceneResponse { + /** + *

                              The ARN of the scene.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the scene was created.

                              + */ + creationDateTime: Date | undefined; +} + +export namespace CreateSceneResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateSceneResponse): any => ({ + ...obj, + }); +} + +export interface CreateWorkspaceRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The description of the workspace.

                              + */ + description?: string; + + /** + *

                              The ARN of the S3 bucket where resources associated with the workspace are stored.

                              + */ + s3Location: string | undefined; + + /** + *

                              The ARN of the execution role associated with the workspace.

                              + */ + role: string | undefined; + + /** + *

                              Metadata that you can use to manage the workspace

                              + */ + tags?: { [key: string]: string }; +} + +export namespace CreateWorkspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateWorkspaceRequest): any => ({ + ...obj, + }); +} + +export interface CreateWorkspaceResponse { + /** + *

                              The ARN of the workspace.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the workspace was created.

                              + */ + creationDateTime: Date | undefined; +} + +export namespace CreateWorkspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateWorkspaceResponse): any => ({ + ...obj, + }); +} + +export interface DeleteComponentTypeRequest { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the component type to delete.

                              + */ + componentTypeId: string | undefined; +} + +export namespace DeleteComponentTypeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteComponentTypeRequest): any => ({ + ...obj, + }); +} + +export interface DeleteComponentTypeResponse { + /** + *

                              The current state of the component type to be deleted.

                              + */ + state: State | string | undefined; +} + +export namespace DeleteComponentTypeResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteComponentTypeResponse): any => ({ + ...obj, + }); +} + +export interface DeleteEntityRequest { + /** + *

                              The ID of the workspace that contains the entity to delete.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the entity to delete.

                              + */ + entityId: string | undefined; + + /** + *

                              A Boolean value that specifies whether the operation deletes child entities.

                              + */ + isRecursive?: boolean; +} + +export namespace DeleteEntityRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEntityRequest): any => ({ + ...obj, + }); +} + +export interface DeleteEntityResponse { + /** + *

                              The current state of the deleted entity.

                              + */ + state: State | string | undefined; +} + +export namespace DeleteEntityResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteEntityResponse): any => ({ + ...obj, + }); +} + +export interface DeleteSceneRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the scene to delete.

                              + */ + sceneId: string | undefined; +} + +export namespace DeleteSceneRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteSceneRequest): any => ({ + ...obj, + }); +} + +export interface DeleteSceneResponse {} + +export namespace DeleteSceneResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteSceneResponse): any => ({ + ...obj, + }); +} + +export interface DeleteWorkspaceRequest { + /** + *

                              The ID of the workspace to delete.

                              + */ + workspaceId: string | undefined; +} + +export namespace DeleteWorkspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteWorkspaceRequest): any => ({ + ...obj, + }); +} + +export interface DeleteWorkspaceResponse {} + +export namespace DeleteWorkspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteWorkspaceResponse): any => ({ + ...obj, + }); +} + +export interface GetComponentTypeRequest { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; +} + +export namespace GetComponentTypeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetComponentTypeRequest): any => ({ + ...obj, + }); +} + +/** + *

                              The function response.

                              + */ +export interface FunctionResponse { + /** + *

                              The required properties of the function.

                              + */ + requiredProperties?: string[]; + + /** + *

                              The scope of the function.

                              + */ + scope?: Scope | string; + + /** + *

                              The data connector.

                              + */ + implementedBy?: DataConnector; + + /** + *

                              Indicates whether this function is inherited.

                              + */ + isInherited?: boolean; +} + +export namespace FunctionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FunctionResponse): any => ({ + ...obj, + }); +} + +export enum ErrorCode { + INTERNAL_FAILURE = "INTERNAL_FAILURE", + VALIDATION_ERROR = "VALIDATION_ERROR", +} + +/** + *

                              The error details.

                              + */ +export interface ErrorDetails { + /** + *

                              The error code.

                              + */ + code?: ErrorCode | string; + + /** + *

                              The error message.

                              + */ + message?: string; +} + +export namespace ErrorDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ErrorDetails): any => ({ + ...obj, + }); +} + +/** + *

                              An object that represents the status of an entity, component, component type, or workspace.

                              + */ +export interface Status { + /** + *

                              The current state of the entity, component, component type, or workspace.

                              + */ + state?: State | string; + + /** + *

                              The error message.

                              + */ + error?: ErrorDetails; +} + +export namespace Status { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Status): any => ({ + ...obj, + }); +} + +export interface GetEntityRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the entity.

                              + */ + entityId: string | undefined; +} + +export namespace GetEntityRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEntityRequest): any => ({ + ...obj, + }); +} + +/** + *

                              The connector failed.

                              + */ +export interface ConnectorFailureException extends __SmithyException, $MetadataBearer { + name: "ConnectorFailureException"; + $fault: "client"; + message?: string; +} + +export namespace ConnectorFailureException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectorFailureException): any => ({ + ...obj, + }); +} + +/** + *

                              The connector timed out.

                              + */ +export interface ConnectorTimeoutException extends __SmithyException, $MetadataBearer { + name: "ConnectorTimeoutException"; + $fault: "client"; + message?: string; +} + +export namespace ConnectorTimeoutException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConnectorTimeoutException): any => ({ + ...obj, + }); +} + +export interface GetPropertyValueRequest { + /** + *

                              The name of the component whose property values the operation returns.

                              + */ + componentName?: string; + + /** + *

                              The ID of the component type whose property values the operation returns.

                              + */ + componentTypeId?: string; + + /** + *

                              The ID of the entity whose property values the operation returns.

                              + */ + entityId?: string; + + /** + *

                              The properties whose values the operation returns.

                              + */ + selectedProperties: string[] | undefined; + + /** + *

                              The ID of the workspace whose values the operation returns.

                              + */ + workspaceId: string | undefined; +} + +export namespace GetPropertyValueRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPropertyValueRequest): any => ({ + ...obj, + }); +} + +export enum InterpolationType { + LINEAR = "LINEAR", +} + +/** + *

                              An object that specifies how to interpolate data in a list.

                              + */ +export interface InterpolationParameters { + /** + *

                              The interpolation type.

                              + */ + interpolationType?: InterpolationType | string; + + /** + *

                              The interpolation time interval in seconds.

                              + */ + intervalInSeconds?: number; +} + +export namespace InterpolationParameters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InterpolationParameters): any => ({ + ...obj, + }); +} + +export enum OrderByTime { + ASCENDING = "ASCENDING", + DESCENDING = "DESCENDING", +} + +export interface GetSceneRequest { + /** + *

                              The ID of the workspace that contains the scene.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the scene.

                              + */ + sceneId: string | undefined; +} + +export namespace GetSceneRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetSceneRequest): any => ({ + ...obj, + }); +} + +export interface GetSceneResponse { + /** + *

                              The ID of the workspace that contains the scene.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the scene.

                              + */ + sceneId: string | undefined; + + /** + *

                              The relative path that specifies the location of the content definition file.

                              + */ + contentLocation: string | undefined; + + /** + *

                              The ARN of the scene.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the scene was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the scene was last updated.

                              + */ + updateDateTime: Date | undefined; + + /** + *

                              The description of the scene.

                              + */ + description?: string; + + /** + *

                              A list of capabilities that the scene uses to render.

                              + */ + capabilities?: string[]; +} + +export namespace GetSceneResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetSceneResponse): any => ({ + ...obj, + }); +} + +export interface GetWorkspaceRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; +} + +export namespace GetWorkspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetWorkspaceRequest): any => ({ + ...obj, + }); +} + +export interface GetWorkspaceResponse { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ARN of the workspace.

                              + */ + arn: string | undefined; + + /** + *

                              The description of the workspace.

                              + */ + description?: string; + + /** + *

                              The ARN of the S3 bucket where resources associated with the workspace are stored.

                              + */ + s3Location: string | undefined; + + /** + *

                              The ARN of the execution role associated with the workspace.

                              + */ + role: string | undefined; + + /** + *

                              The date and time when the workspace was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the workspace was last updated.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace GetWorkspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetWorkspaceResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that filters items in a list of component types.

                              + */ +export type ListComponentTypesFilter = + | ListComponentTypesFilter.ExtendsFromMember + | ListComponentTypesFilter.IsAbstractMember + | ListComponentTypesFilter.NamespaceMember + | ListComponentTypesFilter.$UnknownMember; + +export namespace ListComponentTypesFilter { + /** + *

                              The component type that the component types in the list extend.

                              + */ + export interface ExtendsFromMember { + extendsFrom: string; + namespace?: never; + isAbstract?: never; + $unknown?: never; + } + + /** + *

                              The namespace to which the component types in the list belong.

                              + */ + export interface NamespaceMember { + extendsFrom?: never; + namespace: string; + isAbstract?: never; + $unknown?: never; + } + + /** + *

                              A Boolean value that specifies whether the component types in the list are abstract.

                              + */ + export interface IsAbstractMember { + extendsFrom?: never; + namespace?: never; + isAbstract: boolean; + $unknown?: never; + } + + export interface $UnknownMember { + extendsFrom?: never; + namespace?: never; + isAbstract?: never; + $unknown: [string, any]; + } + + export interface Visitor { + extendsFrom: (value: string) => T; + namespace: (value: string) => T; + isAbstract: (value: boolean) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: ListComponentTypesFilter, visitor: Visitor): T => { + if (value.extendsFrom !== undefined) return visitor.extendsFrom(value.extendsFrom); + if (value.namespace !== undefined) return visitor.namespace(value.namespace); + if (value.isAbstract !== undefined) return visitor.isAbstract(value.isAbstract); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; + + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListComponentTypesFilter): any => { + if (obj.extendsFrom !== undefined) return { extendsFrom: obj.extendsFrom }; + if (obj.namespace !== undefined) return { namespace: obj.namespace }; + if (obj.isAbstract !== undefined) return { isAbstract: obj.isAbstract }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; + }; +} + +export interface ListComponentTypesRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A list of objects that filter the request.

                              + */ + filters?: ListComponentTypesFilter[]; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; + + /** + *

                              The maximum number of results to display.

                              + */ + maxResults?: number; +} + +export namespace ListComponentTypesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListComponentTypesRequest): any => ({ + ...obj, + ...(obj.filters && { filters: obj.filters.map((item) => ListComponentTypesFilter.filterSensitiveLog(item)) }), + }); +} + +/** + *

                              An object that contains information about a component type.

                              + */ +export interface ComponentTypeSummary { + /** + *

                              The ARN of the component type.

                              + */ + arn: string | undefined; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; + + /** + *

                              The date and time when the component type was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the component type was last updated.

                              + */ + updateDateTime: Date | undefined; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              The current status of the component type.

                              + */ + status?: Status; +} + +export namespace ComponentTypeSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentTypeSummary): any => ({ + ...obj, + }); +} + +export interface ListComponentTypesResponse { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A list of objects that contain information about the component types.

                              + */ + componentTypeSummaries: ComponentTypeSummary[] | undefined; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; + + /** + *

                              Specifies the maximum number of results to display.

                              + */ + maxResults?: number; +} + +export namespace ListComponentTypesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListComponentTypesResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that filters items in a list of entities.

                              + */ +export type ListEntitiesFilter = + | ListEntitiesFilter.ComponentTypeIdMember + | ListEntitiesFilter.ParentEntityIdMember + | ListEntitiesFilter.$UnknownMember; + +export namespace ListEntitiesFilter { + /** + *

                              The parent of the entities in the list.

                              + */ + export interface ParentEntityIdMember { + parentEntityId: string; + componentTypeId?: never; + $unknown?: never; + } + + /** + *

                              The ID of the component type in the entities in the list.

                              + */ + export interface ComponentTypeIdMember { + parentEntityId?: never; + componentTypeId: string; + $unknown?: never; + } + + export interface $UnknownMember { + parentEntityId?: never; + componentTypeId?: never; + $unknown: [string, any]; + } + + export interface Visitor { + parentEntityId: (value: string) => T; + componentTypeId: (value: string) => T; + _: (name: string, value: any) => T; + } + + export const visit = (value: ListEntitiesFilter, visitor: Visitor): T => { + if (value.parentEntityId !== undefined) return visitor.parentEntityId(value.parentEntityId); + if (value.componentTypeId !== undefined) return visitor.componentTypeId(value.componentTypeId); + return visitor._(value.$unknown[0], value.$unknown[1]); + }; + + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEntitiesFilter): any => { + if (obj.parentEntityId !== undefined) return { parentEntityId: obj.parentEntityId }; + if (obj.componentTypeId !== undefined) return { componentTypeId: obj.componentTypeId }; + if (obj.$unknown !== undefined) return { [obj.$unknown[0]]: "UNKNOWN" }; + }; +} + +export interface ListEntitiesRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A list of objects that filter the request.

                              + */ + filters?: ListEntitiesFilter[]; + + /** + *

                              The maximum number of results to display.

                              + */ + maxResults?: number; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListEntitiesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEntitiesRequest): any => ({ + ...obj, + ...(obj.filters && { filters: obj.filters.map((item) => ListEntitiesFilter.filterSensitiveLog(item)) }), + }); +} + +/** + *

                              An object that contains information about an entity.

                              + */ +export interface EntitySummary { + /** + *

                              The ID of the entity.

                              + */ + entityId: string | undefined; + + /** + *

                              The name of the entity.

                              + */ + entityName: string | undefined; + + /** + *

                              The ARN of the entity.

                              + */ + arn: string | undefined; + + /** + *

                              The ID of the parent entity.

                              + */ + parentEntityId?: string; + + /** + *

                              The current status of the entity.

                              + */ + status: Status | undefined; + + /** + *

                              The description of the entity.

                              + */ + description?: string; + + /** + *

                              A Boolean value that specifies whether the entity has child entities or not.

                              + */ + hasChildEntities?: boolean; + + /** + *

                              The date and time when the entity was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The last date and time when the entity was updated.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace EntitySummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EntitySummary): any => ({ + ...obj, + }); +} + +export interface ListEntitiesResponse { + /** + *

                              A list of objects that contain information about the entities.

                              + */ + entitySummaries?: EntitySummary[]; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListEntitiesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListEntitiesResponse): any => ({ + ...obj, + }); +} + +export interface ListScenesRequest { + /** + *

                              The ID of the workspace that contains the scenes.

                              + */ + workspaceId: string | undefined; + + /** + *

                              Specifies the maximum number of results to display.

                              + */ + maxResults?: number; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListScenesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListScenesRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains information about a scene.

                              + */ +export interface SceneSummary { + /** + *

                              The ID of the scene.

                              + */ + sceneId: string | undefined; + + /** + *

                              The relative path that specifies the location of the content definition file.

                              + */ + contentLocation: string | undefined; + + /** + *

                              The ARN of the scene.

                              + */ + arn: string | undefined; + + /** + *

                              The date and time when the scene was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the scene was last updated.

                              + */ + updateDateTime: Date | undefined; + + /** + *

                              The scene description.

                              + */ + description?: string; +} + +export namespace SceneSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: SceneSummary): any => ({ + ...obj, + }); +} + +export interface ListScenesResponse { + /** + *

                              A list of objects that contain information about the scenes.

                              + */ + sceneSummaries?: SceneSummary[]; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListScenesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListScenesResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                              The ARN of the resource.

                              + */ + resourceARN: string | undefined; + + /** + *

                              The maximum number of results to display.

                              + */ + maxResults?: number; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                              Metadata that you can use to manage a resource.

                              + */ + tags?: { [key: string]: string }; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface ListWorkspacesRequest { + /** + *

                              The maximum number of results to display.

                              + */ + maxResults?: number; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListWorkspacesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListWorkspacesRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains information about a workspace.

                              + */ +export interface WorkspaceSummary { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ARN of the workspace.

                              + */ + arn: string | undefined; + + /** + *

                              The description of the workspace.

                              + */ + description?: string; + + /** + *

                              The date and time when the workspace was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the workspace was last updated.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace WorkspaceSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WorkspaceSummary): any => ({ + ...obj, + }); +} + +export interface ListWorkspacesResponse { + /** + *

                              A list of objects that contain information about the workspaces.

                              + */ + workspaceSummaries?: WorkspaceSummary[]; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace ListWorkspacesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListWorkspacesResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                              The ARN of the resource.

                              + */ + resourceARN: string | undefined; + + /** + *

                              Metadata to add to this resource.

                              + */ + tags: { [key: string]: string } | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +/** + *

                              The number of tags exceeds the limit.

                              + */ +export interface TooManyTagsException extends __SmithyException, $MetadataBearer { + name: "TooManyTagsException"; + $fault: "client"; + message?: string; +} + +export namespace TooManyTagsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TooManyTagsException): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                              The ARN of the resource.

                              + */ + resourceARN: string | undefined; + + /** + *

                              A list of tag key names to remove from the resource. You don't specify the value. Both the key and its associated value are removed.

                              + */ + tagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateComponentTypeResponse { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ARN of the component type.

                              + */ + arn: string | undefined; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; + + /** + *

                              The current state of the component type.

                              + */ + state: State | string | undefined; +} + +export namespace UpdateComponentTypeResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateComponentTypeResponse): any => ({ + ...obj, + }); +} + +export enum ComponentUpdateType { + CREATE = "CREATE", + DELETE = "DELETE", + UPDATE = "UPDATE", +} + +export enum ParentEntityUpdateType { + DELETE = "DELETE", + UPDATE = "UPDATE", +} + +/** + *

                              The parent entity update request.

                              + */ +export interface ParentEntityUpdateRequest { + /** + *

                              The type of the update.

                              + */ + updateType: ParentEntityUpdateType | string | undefined; + + /** + *

                              The ID of the parent entity.

                              + */ + parentEntityId?: string; +} + +export namespace ParentEntityUpdateRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ParentEntityUpdateRequest): any => ({ + ...obj, + }); +} + +export interface UpdateEntityResponse { + /** + *

                              The date and time when the entity was last updated.

                              + */ + updateDateTime: Date | undefined; + + /** + *

                              The current state of the entity update.

                              + */ + state: State | string | undefined; +} + +export namespace UpdateEntityResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEntityResponse): any => ({ + ...obj, + }); +} + +export interface UpdateSceneRequest { + /** + *

                              The ID of the workspace that contains the scene.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the scene.

                              + */ + sceneId: string | undefined; + + /** + *

                              The relative path that specifies the location of the content definition file.

                              + */ + contentLocation?: string; + + /** + *

                              The description of this scene.

                              + */ + description?: string; + + /** + *

                              A list of capabilities that the scene uses to render.

                              + */ + capabilities?: string[]; +} + +export namespace UpdateSceneRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateSceneRequest): any => ({ + ...obj, + }); +} + +export interface UpdateSceneResponse { + /** + *

                              The date and time when the scene was last updated.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace UpdateSceneResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateSceneResponse): any => ({ + ...obj, + }); +} + +export interface UpdateWorkspaceRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The description of the workspace.

                              + */ + description?: string; + + /** + *

                              The ARN of the execution role associated with the workspace.

                              + */ + role?: string; +} + +export namespace UpdateWorkspaceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateWorkspaceRequest): any => ({ + ...obj, + }); +} + +export interface UpdateWorkspaceResponse { + /** + *

                              The date and time of the current update.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace UpdateWorkspaceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateWorkspaceResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that specifies a value for a property.

                              + */ +export interface DataValue { + /** + *

                              A Boolean value.

                              + */ + booleanValue?: boolean; + + /** + *

                              A double value.

                              + */ + doubleValue?: number; + + /** + *

                              An integer value.

                              + */ + integerValue?: number; + + /** + *

                              A long value.

                              + */ + longValue?: number; + + /** + *

                              A string value.

                              + */ + stringValue?: string; + + /** + *

                              A list of multiple values.

                              + */ + listValue?: DataValue[]; + + /** + *

                              An object that maps strings to multiple DataValue objects.

                              + */ + mapValue?: { [key: string]: DataValue }; + + /** + *

                              A value that relates a component to another component.

                              + */ + relationshipValue?: RelationshipValue; + + /** + *

                              An expression that produces the value.

                              + */ + expression?: string; +} + +export namespace DataValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataValue): any => ({ + ...obj, + }); +} + +/** + *

                              An object that filters items returned by a property request.

                              + */ +export interface PropertyFilter { + /** + *

                              The property name associated with this property filter.

                              + */ + propertyName?: string; + + /** + *

                              The operator associated with this property filter.

                              + */ + operator?: string; + + /** + *

                              The value associated with this property filter.

                              + */ + value?: DataValue; +} + +export namespace PropertyFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyFilter): any => ({ + ...obj, + }); +} + +/** + *

                              The latest value of the property.

                              + */ +export interface PropertyLatestValue { + /** + *

                              An object that specifies information about a property.>

                              + */ + propertyReference: EntityPropertyReference | undefined; + + /** + *

                              The value of the property.

                              + */ + propertyValue?: DataValue; +} + +export namespace PropertyLatestValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyLatestValue): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains information about a value for a time series property.

                              + */ +export interface PropertyValue { + /** + *

                              The timestamp of a value for a time series property.

                              + */ + timestamp: Date | undefined; + + /** + *

                              An object that specifies a value for a time series property.

                              + */ + value: DataValue | undefined; +} + +export namespace PropertyValue { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyValue): any => ({ + ...obj, + }); +} + +/** + *

                              An object that specifies the data type of a property.

                              + */ +export interface DataType { + /** + *

                              The underlying type of the data type.

                              + */ + type: Type | string | undefined; + + /** + *

                              The nested type in the data type.

                              + */ + nestedType?: DataType; + + /** + *

                              The allowed values for this data type.

                              + */ + allowedValues?: DataValue[]; + + /** + *

                              The unit of measure used in this data type.

                              + */ + unitOfMeasure?: string; + + /** + *

                              A relationship that associates a component with another component.

                              + */ + relationship?: Relationship; +} + +export namespace DataType { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataType): any => ({ + ...obj, + }); +} + +export interface GetPropertyValueHistoryRequest { + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the entity.

                              + */ + entityId?: string; + + /** + *

                              The name of the component.

                              + */ + componentName?: string; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId?: string; + + /** + *

                              A list of properties whose value histories the request retrieves.

                              + */ + selectedProperties: string[] | undefined; + + /** + *

                              A list of objects that filter the property value history request.

                              + */ + propertyFilters?: PropertyFilter[]; + + /** + *

                              The date and time of the earliest property value to return.

                              + */ + startDateTime: Date | undefined; + + /** + *

                              The date and time of the latest property value to return.

                              + */ + endDateTime: Date | undefined; + + /** + *

                              An object that specifies the interpolation type and the interval over which to interpolate data.

                              + */ + interpolation?: InterpolationParameters; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; + + /** + *

                              The maximum number of results to return.

                              + */ + maxResults?: number; + + /** + *

                              The time direction to use in the result order.

                              + */ + orderByTime?: OrderByTime | string; +} + +export namespace GetPropertyValueHistoryRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPropertyValueHistoryRequest): any => ({ + ...obj, + }); +} + +export interface GetPropertyValueResponse { + /** + *

                              An object that maps strings to the properties and latest property values in the response. Each string + * in the mapping must be unique to this object.

                              + */ + propertyValues: { [key: string]: PropertyLatestValue } | undefined; +} + +export namespace GetPropertyValueResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPropertyValueResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that specifies information about time series property values.

                              + */ +export interface PropertyValueEntry { + /** + *

                              An object that contains information about the entity that has the property.

                              + */ + entityPropertyReference: EntityPropertyReference | undefined; + + /** + *

                              A list of objects that specify time series property values.

                              + */ + propertyValues?: PropertyValue[]; +} + +export namespace PropertyValueEntry { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyValueEntry): any => ({ + ...obj, + }); +} + +/** + *

                              The history of values for a time series property.

                              + */ +export interface PropertyValueHistory { + /** + *

                              An object that uniquely identifies an entity property.

                              + */ + entityPropertyReference: EntityPropertyReference | undefined; + + /** + *

                              A list of objects that contain information about the values in the history of a time series property.

                              + */ + values?: PropertyValue[]; +} + +export namespace PropertyValueHistory { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyValueHistory): any => ({ + ...obj, + }); +} + +/** + *

                              An error returned by the BatchPutProperty action.

                              + */ +export interface BatchPutPropertyError { + /** + *

                              The error code.

                              + */ + errorCode: string | undefined; + + /** + *

                              The error message.

                              + */ + errorMessage: string | undefined; + + /** + *

                              An object that contains information about errors returned by the BatchPutProperty action.

                              + */ + entry: PropertyValueEntry | undefined; +} + +export namespace BatchPutPropertyError { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchPutPropertyError): any => ({ + ...obj, + }); +} + +export interface BatchPutPropertyValuesRequest { + /** + *

                              The ID of the workspace that contains the properties to set.

                              + */ + workspaceId: string | undefined; + + /** + *

                              An object that maps strings to the property value entries to set. Each string + * in the mapping must be unique to this object.

                              + */ + entries: PropertyValueEntry[] | undefined; +} + +export namespace BatchPutPropertyValuesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchPutPropertyValuesRequest): any => ({ + ...obj, + }); +} + +export interface GetPropertyValueHistoryResponse { + /** + *

                              An object that maps strings to the property definitions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + propertyValues: PropertyValueHistory[] | undefined; + + /** + *

                              The string that specifies the next page of results.

                              + */ + nextToken?: string; +} + +export namespace GetPropertyValueHistoryResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPropertyValueHistoryResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains information about errors returned by the BatchPutProperty action.

                              + */ +export interface BatchPutPropertyErrorEntry { + /** + *

                              A list of objects that contain information about errors returned by the + * BatchPutProperty action.

                              + */ + errors: BatchPutPropertyError[] | undefined; +} + +export namespace BatchPutPropertyErrorEntry { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchPutPropertyErrorEntry): any => ({ + ...obj, + }); +} + +/** + *

                              An object that sets information about a property.

                              + */ +export interface PropertyDefinitionRequest { + /** + *

                              An object that contains information about the data type.

                              + */ + dataType?: DataType; + + /** + *

                              A Boolean value that specifies whether the property is required.

                              + */ + isRequiredInEntity?: boolean; + + /** + *

                              A Boolean value that specifies whether the property ID comes from an external data store.

                              + */ + isExternalId?: boolean; + + /** + *

                              A Boolean value that specifies whether the property is stored externally.

                              + */ + isStoredExternally?: boolean; + + /** + *

                              A Boolean value that specifies whether the property consists of time series data.

                              + */ + isTimeSeries?: boolean; + + /** + *

                              An object that contains the default value.

                              + */ + defaultValue?: DataValue; + + /** + *

                              A mapping that specifies configuration information about the property. Use this field to + * specify information that you read from and write to an external source.

                              + */ + configuration?: { [key: string]: string }; +} + +export namespace PropertyDefinitionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyDefinitionRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains response data from a property definition request.

                              + */ +export interface PropertyDefinitionResponse { + /** + *

                              An object that contains information about the data type.

                              + */ + dataType: DataType | undefined; + + /** + *

                              A Boolean value that specifies whether the property consists of time series data.

                              + */ + isTimeSeries: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property is required in an entity.

                              + */ + isRequiredInEntity: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property ID comes from an external data store.

                              + */ + isExternalId: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property is stored externally.

                              + */ + isStoredExternally: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property definition is imported from an external data store.

                              + */ + isImported: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property definition can be updated.

                              + */ + isFinal: boolean | undefined; + + /** + *

                              A Boolean value that specifies whether the property definition is inherited from a parent entity.

                              + */ + isInherited: boolean | undefined; + + /** + *

                              An object that contains the default value.

                              + */ + defaultValue?: DataValue; + + /** + *

                              A mapping that specifies configuration information about the property.

                              + */ + configuration?: { [key: string]: string }; +} + +export namespace PropertyDefinitionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyDefinitionResponse): any => ({ + ...obj, + }); +} + +export interface BatchPutPropertyValuesResponse { + /** + *

                              Entries that caused errors in the batch put operation.

                              + */ + errorEntries: BatchPutPropertyErrorEntry[] | undefined; +} + +export namespace BatchPutPropertyValuesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BatchPutPropertyValuesResponse): any => ({ + ...obj, + }); +} + +export interface CreateComponentTypeRequest { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A Boolean value that specifies whether an entity can have more than one component of + * this type.

                              + */ + isSingleton?: boolean; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the property definitions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + propertyDefinitions?: { [key: string]: PropertyDefinitionRequest }; + + /** + *

                              Specifies the parent component type to extend.

                              + */ + extendsFrom?: string[]; + + /** + *

                              An object that maps strings to the functions in the component type. Each string in the + * mapping must be unique to this object.

                              + */ + functions?: { [key: string]: FunctionRequest }; + + /** + *

                              Metadata that you can use to manage the component type.

                              + */ + tags?: { [key: string]: string }; +} + +export namespace CreateComponentTypeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateComponentTypeRequest): any => ({ + ...obj, + }); +} + +export interface GetComponentTypeResponse { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A Boolean value that specifies whether an entity can have more than one component of this + * type.

                              + */ + isSingleton?: boolean; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the property definitions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + propertyDefinitions?: { [key: string]: PropertyDefinitionResponse }; + + /** + *

                              The name of the parent component type that this component type extends.

                              + */ + extendsFrom?: string[]; + + /** + *

                              An object that maps strings to the functions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + functions?: { [key: string]: FunctionResponse }; + + /** + *

                              The date and time when the component type was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the component was last updated.

                              + */ + updateDateTime: Date | undefined; + + /** + *

                              The ARN of the component type.

                              + */ + arn: string | undefined; + + /** + *

                              A Boolean value that specifies whether the component type is abstract.

                              + */ + isAbstract?: boolean; + + /** + *

                              A Boolean value that specifies whether the component type has a schema initializer and that the + * schema initializer has run.

                              + */ + isSchemaInitialized?: boolean; + + /** + *

                              The current status of the component type.

                              + */ + status?: Status; +} + +export namespace GetComponentTypeResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetComponentTypeResponse): any => ({ + ...obj, + }); +} + +export interface UpdateComponentTypeRequest { + /** + *

                              The ID of the workspace that contains the component type.

                              + */ + workspaceId: string | undefined; + + /** + *

                              A Boolean value that specifies whether an entity can have more than one component of this + * type.

                              + */ + isSingleton?: boolean; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId: string | undefined; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the property definitions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + propertyDefinitions?: { [key: string]: PropertyDefinitionRequest }; + + /** + *

                              Specifies the component type that this component type extends.

                              + */ + extendsFrom?: string[]; + + /** + *

                              An object that maps strings to the functions in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + functions?: { [key: string]: FunctionRequest }; +} + +export namespace UpdateComponentTypeRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateComponentTypeRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that sets information about a property.

                              + */ +export interface PropertyRequest { + /** + *

                              An object that specifies information about a property.

                              + */ + definition?: PropertyDefinitionRequest; + + /** + *

                              The value of the property.

                              + */ + value?: DataValue; + + /** + *

                              The update type of the update property request.

                              + */ + updateType?: PropertyUpdateType | string; +} + +export namespace PropertyRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that contains information about a property response.

                              + */ +export interface PropertyResponse { + /** + *

                              An object that specifies information about a property.

                              + */ + definition?: PropertyDefinitionResponse; + + /** + *

                              The value of the property.

                              + */ + value?: DataValue; +} + +export namespace PropertyResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PropertyResponse): any => ({ + ...obj, + }); +} + +/** + *

                              An object that sets information about a component type create or update request.

                              + */ +export interface ComponentRequest { + /** + *

                              The description of the component request.

                              + */ + description?: string; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId?: string; + + /** + *

                              An object that maps strings to the properties to set in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + properties?: { [key: string]: PropertyRequest }; +} + +export namespace ComponentRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentRequest): any => ({ + ...obj, + }); +} + +/** + *

                              An object that returns information about a component type create or update request.

                              + */ +export interface ComponentResponse { + /** + *

                              The name of the component.

                              + */ + componentName?: string; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId?: string; + + /** + *

                              The status of the component type.

                              + */ + status?: Status; + + /** + *

                              The name of the property definition set in the request.

                              + */ + definedIn?: string; + + /** + *

                              An object that maps strings to the properties to set in the component type. Each string + * in the mapping must be unique to this object.

                              + */ + properties?: { [key: string]: PropertyResponse }; +} + +export namespace ComponentResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentResponse): any => ({ + ...obj, + }); +} + +/** + *

                              The component update request.

                              + */ +export interface ComponentUpdateRequest { + /** + *

                              The update type of the component update request.

                              + */ + updateType?: ComponentUpdateType | string; + + /** + *

                              The description of the component type.

                              + */ + description?: string; + + /** + *

                              The ID of the component type.

                              + */ + componentTypeId?: string; + + /** + *

                              An object that maps strings to the properties to set in the component type update. Each string + * in the mapping must be unique to this object.

                              + */ + propertyUpdates?: { [key: string]: PropertyRequest }; +} + +export namespace ComponentUpdateRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ComponentUpdateRequest): any => ({ + ...obj, + }); +} + +export interface CreateEntityRequest { + /** + *

                              The ID of the workspace that contains the entity.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the entity.

                              + */ + entityId?: string; + + /** + *

                              The name of the entity.

                              + */ + entityName: string | undefined; + + /** + *

                              The description of the entity.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the components in the entity. Each string + * in the mapping must be unique to this object.

                              + */ + components?: { [key: string]: ComponentRequest }; + + /** + *

                              The ID of the entity's parent entity.

                              + */ + parentEntityId?: string; + + /** + *

                              Metadata that you can use to manage the entity.

                              + */ + tags?: { [key: string]: string }; +} + +export namespace CreateEntityRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateEntityRequest): any => ({ + ...obj, + }); +} + +export interface GetEntityResponse { + /** + *

                              The ID of the entity.

                              + */ + entityId: string | undefined; + + /** + *

                              The name of the entity.

                              + */ + entityName: string | undefined; + + /** + *

                              The ARN of the entity.

                              + */ + arn: string | undefined; + + /** + *

                              The current status of the entity.

                              + */ + status: Status | undefined; + + /** + *

                              The ID of the workspace.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The description of the entity.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the components in the entity. Each string + * in the mapping must be unique to this object.

                              + */ + components?: { [key: string]: ComponentResponse }; + + /** + *

                              The ID of the parent entity for this entity.

                              + */ + parentEntityId: string | undefined; + + /** + *

                              A Boolean value that specifies whether the entity has associated child entities.

                              + */ + hasChildEntities: boolean | undefined; + + /** + *

                              The date and time when the entity was created.

                              + */ + creationDateTime: Date | undefined; + + /** + *

                              The date and time when the entity was last updated.

                              + */ + updateDateTime: Date | undefined; +} + +export namespace GetEntityResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEntityResponse): any => ({ + ...obj, + }); +} + +export interface UpdateEntityRequest { + /** + *

                              The ID of the workspace that contains the entity.

                              + */ + workspaceId: string | undefined; + + /** + *

                              The ID of the entity.

                              + */ + entityId: string | undefined; + + /** + *

                              The name of the entity.

                              + */ + entityName?: string; + + /** + *

                              The description of the entity.

                              + */ + description?: string; + + /** + *

                              An object that maps strings to the component updates in the request. Each string + * in the mapping must be unique to this object.

                              + */ + componentUpdates?: { [key: string]: ComponentUpdateRequest }; + + /** + *

                              An object that describes the update request for a parent entity.

                              + */ + parentEntityUpdate?: ParentEntityUpdateRequest; +} + +export namespace UpdateEntityRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateEntityRequest): any => ({ + ...obj, + }); +} diff --git a/clients/client-iottwinmaker/src/pagination/GetPropertyValueHistoryPaginator.ts b/clients/client-iottwinmaker/src/pagination/GetPropertyValueHistoryPaginator.ts new file mode 100644 index 000000000000..71d1efb8b065 --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/GetPropertyValueHistoryPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + GetPropertyValueHistoryCommand, + GetPropertyValueHistoryCommandInput, + GetPropertyValueHistoryCommandOutput, +} from "../commands/GetPropertyValueHistoryCommand"; +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; +import { IoTTwinMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTTwinMakerClient, + input: GetPropertyValueHistoryCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetPropertyValueHistoryCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTTwinMaker, + input: GetPropertyValueHistoryCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getPropertyValueHistory(input, ...args); +}; +export async function* paginateGetPropertyValueHistory( + config: IoTTwinMakerPaginationConfiguration, + input: GetPropertyValueHistoryCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetPropertyValueHistoryCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTTwinMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTTwinMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTTwinMaker | IoTTwinMakerClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iottwinmaker/src/pagination/Interfaces.ts b/clients/client-iottwinmaker/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..eb86fe5046ee --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; + +export interface IoTTwinMakerPaginationConfiguration extends PaginationConfiguration { + client: IoTTwinMaker | IoTTwinMakerClient; +} diff --git a/clients/client-iottwinmaker/src/pagination/ListComponentTypesPaginator.ts b/clients/client-iottwinmaker/src/pagination/ListComponentTypesPaginator.ts new file mode 100644 index 000000000000..0e3eefa16a8f --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/ListComponentTypesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListComponentTypesCommand, + ListComponentTypesCommandInput, + ListComponentTypesCommandOutput, +} from "../commands/ListComponentTypesCommand"; +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; +import { IoTTwinMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTTwinMakerClient, + input: ListComponentTypesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListComponentTypesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTTwinMaker, + input: ListComponentTypesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listComponentTypes(input, ...args); +}; +export async function* paginateListComponentTypes( + config: IoTTwinMakerPaginationConfiguration, + input: ListComponentTypesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListComponentTypesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTTwinMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTTwinMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTTwinMaker | IoTTwinMakerClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iottwinmaker/src/pagination/ListEntitiesPaginator.ts b/clients/client-iottwinmaker/src/pagination/ListEntitiesPaginator.ts new file mode 100644 index 000000000000..921dfe99f506 --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/ListEntitiesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListEntitiesCommand, + ListEntitiesCommandInput, + ListEntitiesCommandOutput, +} from "../commands/ListEntitiesCommand"; +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; +import { IoTTwinMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTTwinMakerClient, + input: ListEntitiesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListEntitiesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTTwinMaker, + input: ListEntitiesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listEntities(input, ...args); +}; +export async function* paginateListEntities( + config: IoTTwinMakerPaginationConfiguration, + input: ListEntitiesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListEntitiesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTTwinMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTTwinMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTTwinMaker | IoTTwinMakerClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iottwinmaker/src/pagination/ListScenesPaginator.ts b/clients/client-iottwinmaker/src/pagination/ListScenesPaginator.ts new file mode 100644 index 000000000000..8353bcb71d10 --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/ListScenesPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListScenesCommand, ListScenesCommandInput, ListScenesCommandOutput } from "../commands/ListScenesCommand"; +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; +import { IoTTwinMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTTwinMakerClient, + input: ListScenesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListScenesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTTwinMaker, + input: ListScenesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listScenes(input, ...args); +}; +export async function* paginateListScenes( + config: IoTTwinMakerPaginationConfiguration, + input: ListScenesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListScenesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTTwinMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTTwinMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTTwinMaker | IoTTwinMakerClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iottwinmaker/src/pagination/ListWorkspacesPaginator.ts b/clients/client-iottwinmaker/src/pagination/ListWorkspacesPaginator.ts new file mode 100644 index 000000000000..eb5860a49c1e --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/ListWorkspacesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListWorkspacesCommand, + ListWorkspacesCommandInput, + ListWorkspacesCommandOutput, +} from "../commands/ListWorkspacesCommand"; +import { IoTTwinMaker } from "../IoTTwinMaker"; +import { IoTTwinMakerClient } from "../IoTTwinMakerClient"; +import { IoTTwinMakerPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: IoTTwinMakerClient, + input: ListWorkspacesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListWorkspacesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: IoTTwinMaker, + input: ListWorkspacesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listWorkspaces(input, ...args); +}; +export async function* paginateListWorkspaces( + config: IoTTwinMakerPaginationConfiguration, + input: ListWorkspacesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListWorkspacesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof IoTTwinMaker) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof IoTTwinMakerClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected IoTTwinMaker | IoTTwinMakerClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-iottwinmaker/src/pagination/index.ts b/clients/client-iottwinmaker/src/pagination/index.ts new file mode 100644 index 000000000000..4b8b181348e0 --- /dev/null +++ b/clients/client-iottwinmaker/src/pagination/index.ts @@ -0,0 +1,6 @@ +export * from "./GetPropertyValueHistoryPaginator"; +export * from "./Interfaces"; +export * from "./ListComponentTypesPaginator"; +export * from "./ListEntitiesPaginator"; +export * from "./ListScenesPaginator"; +export * from "./ListWorkspacesPaginator"; diff --git a/clients/client-iottwinmaker/src/protocols/Aws_restJson1.ts b/clients/client-iottwinmaker/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..ffe21106ae46 --- /dev/null +++ b/clients/client-iottwinmaker/src/protocols/Aws_restJson1.ts @@ -0,0 +1,5066 @@ +import { + HttpRequest as __HttpRequest, + HttpResponse as __HttpResponse, + isValidHostname as __isValidHostname, +} from "@aws-sdk/protocol-http"; +import { + expectBoolean as __expectBoolean, + expectInt32 as __expectInt32, + expectLong as __expectLong, + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectObject as __expectObject, + expectString as __expectString, + extendedEncodeURIComponent as __extendedEncodeURIComponent, + limitedParseDouble as __limitedParseDouble, + parseEpochTimestamp as __parseEpochTimestamp, + serializeFloat as __serializeFloat, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { + BatchPutPropertyValuesCommandInput, + BatchPutPropertyValuesCommandOutput, +} from "../commands/BatchPutPropertyValuesCommand"; +import { + CreateComponentTypeCommandInput, + CreateComponentTypeCommandOutput, +} from "../commands/CreateComponentTypeCommand"; +import { CreateEntityCommandInput, CreateEntityCommandOutput } from "../commands/CreateEntityCommand"; +import { CreateSceneCommandInput, CreateSceneCommandOutput } from "../commands/CreateSceneCommand"; +import { CreateWorkspaceCommandInput, CreateWorkspaceCommandOutput } from "../commands/CreateWorkspaceCommand"; +import { + DeleteComponentTypeCommandInput, + DeleteComponentTypeCommandOutput, +} from "../commands/DeleteComponentTypeCommand"; +import { DeleteEntityCommandInput, DeleteEntityCommandOutput } from "../commands/DeleteEntityCommand"; +import { DeleteSceneCommandInput, DeleteSceneCommandOutput } from "../commands/DeleteSceneCommand"; +import { DeleteWorkspaceCommandInput, DeleteWorkspaceCommandOutput } from "../commands/DeleteWorkspaceCommand"; +import { GetComponentTypeCommandInput, GetComponentTypeCommandOutput } from "../commands/GetComponentTypeCommand"; +import { GetEntityCommandInput, GetEntityCommandOutput } from "../commands/GetEntityCommand"; +import { GetPropertyValueCommandInput, GetPropertyValueCommandOutput } from "../commands/GetPropertyValueCommand"; +import { + GetPropertyValueHistoryCommandInput, + GetPropertyValueHistoryCommandOutput, +} from "../commands/GetPropertyValueHistoryCommand"; +import { GetSceneCommandInput, GetSceneCommandOutput } from "../commands/GetSceneCommand"; +import { GetWorkspaceCommandInput, GetWorkspaceCommandOutput } from "../commands/GetWorkspaceCommand"; +import { ListComponentTypesCommandInput, ListComponentTypesCommandOutput } from "../commands/ListComponentTypesCommand"; +import { ListEntitiesCommandInput, ListEntitiesCommandOutput } from "../commands/ListEntitiesCommand"; +import { ListScenesCommandInput, ListScenesCommandOutput } from "../commands/ListScenesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { ListWorkspacesCommandInput, ListWorkspacesCommandOutput } from "../commands/ListWorkspacesCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateComponentTypeCommandInput, + UpdateComponentTypeCommandOutput, +} from "../commands/UpdateComponentTypeCommand"; +import { UpdateEntityCommandInput, UpdateEntityCommandOutput } from "../commands/UpdateEntityCommand"; +import { UpdateSceneCommandInput, UpdateSceneCommandOutput } from "../commands/UpdateSceneCommand"; +import { UpdateWorkspaceCommandInput, UpdateWorkspaceCommandOutput } from "../commands/UpdateWorkspaceCommand"; +import { + AccessDeniedException, + BatchPutPropertyError, + BatchPutPropertyErrorEntry, + ComponentRequest, + ComponentResponse, + ComponentTypeSummary, + ComponentUpdateRequest, + ConflictException, + ConnectorFailureException, + ConnectorTimeoutException, + DataConnector, + DataType, + DataValue, + EntityPropertyReference, + EntitySummary, + ErrorDetails, + FunctionRequest, + FunctionResponse, + InternalServerException, + InterpolationParameters, + LambdaFunction, + ListComponentTypesFilter, + ListEntitiesFilter, + ParentEntityUpdateRequest, + PropertyDefinitionRequest, + PropertyDefinitionResponse, + PropertyFilter, + PropertyLatestValue, + PropertyRequest, + PropertyResponse, + PropertyValue, + PropertyValueEntry, + PropertyValueHistory, + Relationship, + RelationshipValue, + ResourceNotFoundException, + SceneSummary, + ServiceQuotaExceededException, + Status, + ThrottlingException, + TooManyTagsException, + ValidationException, + WorkspaceSummary, +} from "../models/models_0"; + +export const serializeAws_restJson1BatchPutPropertyValuesCommand = async ( + input: BatchPutPropertyValuesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entity-properties"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.entries !== undefined && + input.entries !== null && { entries: serializeAws_restJson1Entries(input.entries, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateComponentTypeCommand = async ( + input: CreateComponentTypeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/component-types/{componentTypeId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.componentTypeId !== undefined) { + const labelValue: string = input.componentTypeId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: componentTypeId."); + } + resolvedPath = resolvedPath.replace("{componentTypeId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: componentTypeId."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.extendsFrom !== undefined && + input.extendsFrom !== null && { extendsFrom: serializeAws_restJson1ExtendsFrom(input.extendsFrom, context) }), + ...(input.functions !== undefined && + input.functions !== null && { functions: serializeAws_restJson1FunctionsRequest(input.functions, context) }), + ...(input.isSingleton !== undefined && input.isSingleton !== null && { isSingleton: input.isSingleton }), + ...(input.propertyDefinitions !== undefined && + input.propertyDefinitions !== null && { + propertyDefinitions: serializeAws_restJson1PropertyDefinitionsRequest(input.propertyDefinitions, context), + }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateEntityCommand = async ( + input: CreateEntityCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}/entities"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.components !== undefined && + input.components !== null && { + components: serializeAws_restJson1ComponentsMapRequest(input.components, context), + }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.entityName !== undefined && input.entityName !== null && { entityName: input.entityName }), + ...(input.parentEntityId !== undefined && + input.parentEntityId !== null && { parentEntityId: input.parentEntityId }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateSceneCommand = async ( + input: CreateSceneCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}/scenes"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.capabilities !== undefined && + input.capabilities !== null && { + capabilities: serializeAws_restJson1SceneCapabilities(input.capabilities, context), + }), + ...(input.contentLocation !== undefined && + input.contentLocation !== null && { contentLocation: input.contentLocation }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.sceneId !== undefined && input.sceneId !== null && { sceneId: input.sceneId }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateWorkspaceCommand = async ( + input: CreateWorkspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.role !== undefined && input.role !== null && { role: input.role }), + ...(input.s3Location !== undefined && input.s3Location !== null && { s3Location: input.s3Location }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteComponentTypeCommand = async ( + input: DeleteComponentTypeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/component-types/{componentTypeId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.componentTypeId !== undefined) { + const labelValue: string = input.componentTypeId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: componentTypeId."); + } + resolvedPath = resolvedPath.replace("{componentTypeId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: componentTypeId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteEntityCommand = async ( + input: DeleteEntityCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entities/{entityId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.entityId !== undefined) { + const labelValue: string = input.entityId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: entityId."); + } + resolvedPath = resolvedPath.replace("{entityId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: entityId."); + } + const query: any = { + ...(input.isRecursive !== undefined && { isRecursive: input.isRecursive.toString() }), + }; + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1DeleteSceneCommand = async ( + input: DeleteSceneCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/scenes/{sceneId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.sceneId !== undefined) { + const labelValue: string = input.sceneId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: sceneId."); + } + resolvedPath = resolvedPath.replace("{sceneId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: sceneId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteWorkspaceCommand = async ( + input: DeleteWorkspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetComponentTypeCommand = async ( + input: GetComponentTypeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/component-types/{componentTypeId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.componentTypeId !== undefined) { + const labelValue: string = input.componentTypeId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: componentTypeId."); + } + resolvedPath = resolvedPath.replace("{componentTypeId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: componentTypeId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetEntityCommand = async ( + input: GetEntityCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entities/{entityId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.entityId !== undefined) { + const labelValue: string = input.entityId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: entityId."); + } + resolvedPath = resolvedPath.replace("{entityId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: entityId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetPropertyValueCommand = async ( + input: GetPropertyValueCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entity-properties/value"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.componentName !== undefined && input.componentName !== null && { componentName: input.componentName }), + ...(input.componentTypeId !== undefined && + input.componentTypeId !== null && { componentTypeId: input.componentTypeId }), + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.selectedProperties !== undefined && + input.selectedProperties !== null && { + selectedProperties: serializeAws_restJson1SelectedPropertyList(input.selectedProperties, context), + }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetPropertyValueHistoryCommand = async ( + input: GetPropertyValueHistoryCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entity-properties/history"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.componentName !== undefined && input.componentName !== null && { componentName: input.componentName }), + ...(input.componentTypeId !== undefined && + input.componentTypeId !== null && { componentTypeId: input.componentTypeId }), + ...(input.endDateTime !== undefined && + input.endDateTime !== null && { endDateTime: Math.round(input.endDateTime.getTime() / 1000) }), + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.interpolation !== undefined && + input.interpolation !== null && { + interpolation: serializeAws_restJson1InterpolationParameters(input.interpolation, context), + }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.orderByTime !== undefined && input.orderByTime !== null && { orderByTime: input.orderByTime }), + ...(input.propertyFilters !== undefined && + input.propertyFilters !== null && { + propertyFilters: serializeAws_restJson1PropertyFilters(input.propertyFilters, context), + }), + ...(input.selectedProperties !== undefined && + input.selectedProperties !== null && { + selectedProperties: serializeAws_restJson1SelectedPropertyList(input.selectedProperties, context), + }), + ...(input.startDateTime !== undefined && + input.startDateTime !== null && { startDateTime: Math.round(input.startDateTime.getTime() / 1000) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetSceneCommand = async ( + input: GetSceneCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/scenes/{sceneId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.sceneId !== undefined) { + const labelValue: string = input.sceneId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: sceneId."); + } + resolvedPath = resolvedPath.replace("{sceneId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: sceneId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetWorkspaceCommand = async ( + input: GetWorkspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListComponentTypesCommand = async ( + input: ListComponentTypesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/component-types-list"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.filters !== undefined && + input.filters !== null && { filters: serializeAws_restJson1ListComponentTypesFilters(input.filters, context) }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListEntitiesCommand = async ( + input: ListEntitiesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}/entities-list"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.filters !== undefined && + input.filters !== null && { filters: serializeAws_restJson1ListEntitiesFilters(input.filters, context) }), + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListScenesCommand = async ( + input: ListScenesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}/scenes-list"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags-list"; + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + ...(input.resourceARN !== undefined && input.resourceARN !== null && { resourceARN: input.resourceARN }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListWorkspacesCommand = async ( + input: ListWorkspacesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces-list"; + let body: any; + body = JSON.stringify({ + ...(input.maxResults !== undefined && input.maxResults !== null && { maxResults: input.maxResults }), + ...(input.nextToken !== undefined && input.nextToken !== null && { nextToken: input.nextToken }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags"; + let body: any; + body = JSON.stringify({ + ...(input.resourceARN !== undefined && input.resourceARN !== null && { resourceARN: input.resourceARN }), + ...(input.tags !== undefined && input.tags !== null && { tags: serializeAws_restJson1TagMap(input.tags, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags"; + const query: any = { + ...(input.resourceARN !== undefined && { resourceARN: input.resourceARN }), + ...(input.tagKeys !== undefined && { tagKeys: (input.tagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateComponentTypeCommand = async ( + input: UpdateComponentTypeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/component-types/{componentTypeId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.componentTypeId !== undefined) { + const labelValue: string = input.componentTypeId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: componentTypeId."); + } + resolvedPath = resolvedPath.replace("{componentTypeId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: componentTypeId."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.extendsFrom !== undefined && + input.extendsFrom !== null && { extendsFrom: serializeAws_restJson1ExtendsFrom(input.extendsFrom, context) }), + ...(input.functions !== undefined && + input.functions !== null && { functions: serializeAws_restJson1FunctionsRequest(input.functions, context) }), + ...(input.isSingleton !== undefined && input.isSingleton !== null && { isSingleton: input.isSingleton }), + ...(input.propertyDefinitions !== undefined && + input.propertyDefinitions !== null && { + propertyDefinitions: serializeAws_restJson1PropertyDefinitionsRequest(input.propertyDefinitions, context), + }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateEntityCommand = async ( + input: UpdateEntityCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/entities/{entityId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.entityId !== undefined) { + const labelValue: string = input.entityId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: entityId."); + } + resolvedPath = resolvedPath.replace("{entityId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: entityId."); + } + let body: any; + body = JSON.stringify({ + ...(input.componentUpdates !== undefined && + input.componentUpdates !== null && { + componentUpdates: serializeAws_restJson1ComponentUpdatesMapRequest(input.componentUpdates, context), + }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.entityName !== undefined && input.entityName !== null && { entityName: input.entityName }), + ...(input.parentEntityUpdate !== undefined && + input.parentEntityUpdate !== null && { + parentEntityUpdate: serializeAws_restJson1ParentEntityUpdateRequest(input.parentEntityUpdate, context), + }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateSceneCommand = async ( + input: UpdateSceneCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/workspaces/{workspaceId}/scenes/{sceneId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + if (input.sceneId !== undefined) { + const labelValue: string = input.sceneId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: sceneId."); + } + resolvedPath = resolvedPath.replace("{sceneId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: sceneId."); + } + let body: any; + body = JSON.stringify({ + ...(input.capabilities !== undefined && + input.capabilities !== null && { + capabilities: serializeAws_restJson1SceneCapabilities(input.capabilities, context), + }), + ...(input.contentLocation !== undefined && + input.contentLocation !== null && { contentLocation: input.contentLocation }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateWorkspaceCommand = async ( + input: UpdateWorkspaceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/workspaces/{workspaceId}"; + if (input.workspaceId !== undefined) { + const labelValue: string = input.workspaceId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: workspaceId."); + } + resolvedPath = resolvedPath.replace("{workspaceId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: workspaceId."); + } + let body: any; + body = JSON.stringify({ + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.role !== undefined && input.role !== null && { role: input.role }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "api." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1BatchPutPropertyValuesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchPutPropertyValuesCommandError(output, context); + } + const contents: BatchPutPropertyValuesCommandOutput = { + $metadata: deserializeMetadata(output), + errorEntries: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.errorEntries !== undefined && data.errorEntries !== null) { + contents.errorEntries = deserializeAws_restJson1ErrorEntries(data.errorEntries, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchPutPropertyValuesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateComponentTypeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateComponentTypeCommandError(output, context); + } + const contents: CreateComponentTypeCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + creationDateTime: undefined, + state: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateComponentTypeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.iottwinmaker#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateEntityCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateEntityCommandError(output, context); + } + const contents: CreateEntityCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + creationDateTime: undefined, + entityId: undefined, + state: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.entityId !== undefined && data.entityId !== null) { + contents.entityId = __expectString(data.entityId); + } + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateEntityCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.iottwinmaker#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateSceneCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateSceneCommandError(output, context); + } + const contents: CreateSceneCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + creationDateTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateSceneCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.iottwinmaker#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateWorkspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateWorkspaceCommandError(output, context); + } + const contents: CreateWorkspaceCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + creationDateTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateWorkspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.iottwinmaker#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteComponentTypeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteComponentTypeCommandError(output, context); + } + const contents: DeleteComponentTypeCommandOutput = { + $metadata: deserializeMetadata(output), + state: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteComponentTypeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteEntityCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteEntityCommandError(output, context); + } + const contents: DeleteEntityCommandOutput = { + $metadata: deserializeMetadata(output), + state: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteEntityCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteSceneCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteSceneCommandError(output, context); + } + const contents: DeleteSceneCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteSceneCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteWorkspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteWorkspaceCommandError(output, context); + } + const contents: DeleteWorkspaceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteWorkspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetComponentTypeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetComponentTypeCommandError(output, context); + } + const contents: GetComponentTypeCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + componentTypeId: undefined, + creationDateTime: undefined, + description: undefined, + extendsFrom: undefined, + functions: undefined, + isAbstract: undefined, + isSchemaInitialized: undefined, + isSingleton: undefined, + propertyDefinitions: undefined, + status: undefined, + updateDateTime: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.componentTypeId !== undefined && data.componentTypeId !== null) { + contents.componentTypeId = __expectString(data.componentTypeId); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.description !== undefined && data.description !== null) { + contents.description = __expectString(data.description); + } + if (data.extendsFrom !== undefined && data.extendsFrom !== null) { + contents.extendsFrom = deserializeAws_restJson1ExtendsFrom(data.extendsFrom, context); + } + if (data.functions !== undefined && data.functions !== null) { + contents.functions = deserializeAws_restJson1FunctionsResponse(data.functions, context); + } + if (data.isAbstract !== undefined && data.isAbstract !== null) { + contents.isAbstract = __expectBoolean(data.isAbstract); + } + if (data.isSchemaInitialized !== undefined && data.isSchemaInitialized !== null) { + contents.isSchemaInitialized = __expectBoolean(data.isSchemaInitialized); + } + if (data.isSingleton !== undefined && data.isSingleton !== null) { + contents.isSingleton = __expectBoolean(data.isSingleton); + } + if (data.propertyDefinitions !== undefined && data.propertyDefinitions !== null) { + contents.propertyDefinitions = deserializeAws_restJson1PropertyDefinitionsResponse( + data.propertyDefinitions, + context + ); + } + if (data.status !== undefined && data.status !== null) { + contents.status = deserializeAws_restJson1Status(data.status, context); + } + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetComponentTypeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetEntityCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetEntityCommandError(output, context); + } + const contents: GetEntityCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + components: undefined, + creationDateTime: undefined, + description: undefined, + entityId: undefined, + entityName: undefined, + hasChildEntities: undefined, + parentEntityId: undefined, + status: undefined, + updateDateTime: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.components !== undefined && data.components !== null) { + contents.components = deserializeAws_restJson1ComponentsMap(data.components, context); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.description !== undefined && data.description !== null) { + contents.description = __expectString(data.description); + } + if (data.entityId !== undefined && data.entityId !== null) { + contents.entityId = __expectString(data.entityId); + } + if (data.entityName !== undefined && data.entityName !== null) { + contents.entityName = __expectString(data.entityName); + } + if (data.hasChildEntities !== undefined && data.hasChildEntities !== null) { + contents.hasChildEntities = __expectBoolean(data.hasChildEntities); + } + if (data.parentEntityId !== undefined && data.parentEntityId !== null) { + contents.parentEntityId = __expectString(data.parentEntityId); + } + if (data.status !== undefined && data.status !== null) { + contents.status = deserializeAws_restJson1Status(data.status, context); + } + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetEntityCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetPropertyValueCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetPropertyValueCommandError(output, context); + } + const contents: GetPropertyValueCommandOutput = { + $metadata: deserializeMetadata(output), + propertyValues: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.propertyValues !== undefined && data.propertyValues !== null) { + contents.propertyValues = deserializeAws_restJson1PropertyLatestValueMap(data.propertyValues, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetPropertyValueCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConnectorFailureException": + case "com.amazonaws.iottwinmaker#ConnectorFailureException": + response = { + ...(await deserializeAws_restJson1ConnectorFailureExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConnectorTimeoutException": + case "com.amazonaws.iottwinmaker#ConnectorTimeoutException": + response = { + ...(await deserializeAws_restJson1ConnectorTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetPropertyValueHistoryCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetPropertyValueHistoryCommandError(output, context); + } + const contents: GetPropertyValueHistoryCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + propertyValues: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.propertyValues !== undefined && data.propertyValues !== null) { + contents.propertyValues = deserializeAws_restJson1PropertyValueList(data.propertyValues, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetPropertyValueHistoryCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConnectorFailureException": + case "com.amazonaws.iottwinmaker#ConnectorFailureException": + response = { + ...(await deserializeAws_restJson1ConnectorFailureExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConnectorTimeoutException": + case "com.amazonaws.iottwinmaker#ConnectorTimeoutException": + response = { + ...(await deserializeAws_restJson1ConnectorTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetSceneCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetSceneCommandError(output, context); + } + const contents: GetSceneCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + capabilities: undefined, + contentLocation: undefined, + creationDateTime: undefined, + description: undefined, + sceneId: undefined, + updateDateTime: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.capabilities !== undefined && data.capabilities !== null) { + contents.capabilities = deserializeAws_restJson1SceneCapabilities(data.capabilities, context); + } + if (data.contentLocation !== undefined && data.contentLocation !== null) { + contents.contentLocation = __expectString(data.contentLocation); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.description !== undefined && data.description !== null) { + contents.description = __expectString(data.description); + } + if (data.sceneId !== undefined && data.sceneId !== null) { + contents.sceneId = __expectString(data.sceneId); + } + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetSceneCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetWorkspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetWorkspaceCommandError(output, context); + } + const contents: GetWorkspaceCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + creationDateTime: undefined, + description: undefined, + role: undefined, + s3Location: undefined, + updateDateTime: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.creationDateTime !== undefined && data.creationDateTime !== null) { + contents.creationDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.creationDateTime))); + } + if (data.description !== undefined && data.description !== null) { + contents.description = __expectString(data.description); + } + if (data.role !== undefined && data.role !== null) { + contents.role = __expectString(data.role); + } + if (data.s3Location !== undefined && data.s3Location !== null) { + contents.s3Location = __expectString(data.s3Location); + } + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetWorkspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListComponentTypesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListComponentTypesCommandError(output, context); + } + const contents: ListComponentTypesCommandOutput = { + $metadata: deserializeMetadata(output), + componentTypeSummaries: undefined, + maxResults: undefined, + nextToken: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.componentTypeSummaries !== undefined && data.componentTypeSummaries !== null) { + contents.componentTypeSummaries = deserializeAws_restJson1ComponentTypeSummaries( + data.componentTypeSummaries, + context + ); + } + if (data.maxResults !== undefined && data.maxResults !== null) { + contents.maxResults = __expectInt32(data.maxResults); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListComponentTypesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListEntitiesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListEntitiesCommandError(output, context); + } + const contents: ListEntitiesCommandOutput = { + $metadata: deserializeMetadata(output), + entitySummaries: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.entitySummaries !== undefined && data.entitySummaries !== null) { + contents.entitySummaries = deserializeAws_restJson1EntitySummaries(data.entitySummaries, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListEntitiesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListScenesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListScenesCommandError(output, context); + } + const contents: ListScenesCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + sceneSummaries: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.sceneSummaries !== undefined && data.sceneSummaries !== null) { + contents.sceneSummaries = deserializeAws_restJson1SceneSummaries(data.sceneSummaries, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListScenesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.tags !== undefined && data.tags !== null) { + contents.tags = deserializeAws_restJson1TagMap(data.tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListWorkspacesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListWorkspacesCommandError(output, context); + } + const contents: ListWorkspacesCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + workspaceSummaries: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.workspaceSummaries !== undefined && data.workspaceSummaries !== null) { + contents.workspaceSummaries = deserializeAws_restJson1WorkspaceSummaries(data.workspaceSummaries, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListWorkspacesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyTagsException": + case "com.amazonaws.iottwinmaker#TooManyTagsException": + response = { + ...(await deserializeAws_restJson1TooManyTagsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateComponentTypeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateComponentTypeCommandError(output, context); + } + const contents: UpdateComponentTypeCommandOutput = { + $metadata: deserializeMetadata(output), + arn: undefined, + componentTypeId: undefined, + state: undefined, + workspaceId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.arn !== undefined && data.arn !== null) { + contents.arn = __expectString(data.arn); + } + if (data.componentTypeId !== undefined && data.componentTypeId !== null) { + contents.componentTypeId = __expectString(data.componentTypeId); + } + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + if (data.workspaceId !== undefined && data.workspaceId !== null) { + contents.workspaceId = __expectString(data.workspaceId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateComponentTypeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateEntityCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateEntityCommandError(output, context); + } + const contents: UpdateEntityCommandOutput = { + $metadata: deserializeMetadata(output), + state: undefined, + updateDateTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.state !== undefined && data.state !== null) { + contents.state = __expectString(data.state); + } + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateEntityCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.iottwinmaker#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateSceneCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateSceneCommandError(output, context); + } + const contents: UpdateSceneCommandOutput = { + $metadata: deserializeMetadata(output), + updateDateTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateSceneCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateWorkspaceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateWorkspaceCommandError(output, context); + } + const contents: UpdateWorkspaceCommandOutput = { + $metadata: deserializeMetadata(output), + updateDateTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.updateDateTime !== undefined && data.updateDateTime !== null) { + contents.updateDateTime = __expectNonNull(__parseEpochTimestamp(__expectNumber(data.updateDateTime))); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateWorkspaceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.iottwinmaker#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.iottwinmaker#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.iottwinmaker#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.iottwinmaker#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.iottwinmaker#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConnectorFailureExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConnectorFailureException = { + name: "ConnectorFailureException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConnectorTimeoutExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConnectorTimeoutException = { + name: "ConnectorTimeoutException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottlingException = { + name: "ThrottlingException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1TooManyTagsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TooManyTagsException = { + name: "TooManyTagsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const serializeAws_restJson1ComponentRequest = (input: ComponentRequest, context: __SerdeContext): any => { + return { + ...(input.componentTypeId !== undefined && + input.componentTypeId !== null && { componentTypeId: input.componentTypeId }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.properties !== undefined && + input.properties !== null && { properties: serializeAws_restJson1PropertyRequests(input.properties, context) }), + }; +}; + +const serializeAws_restJson1ComponentsMapRequest = ( + input: { [key: string]: ComponentRequest }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1ComponentRequest(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1ComponentUpdateRequest = (input: ComponentUpdateRequest, context: __SerdeContext): any => { + return { + ...(input.componentTypeId !== undefined && + input.componentTypeId !== null && { componentTypeId: input.componentTypeId }), + ...(input.description !== undefined && input.description !== null && { description: input.description }), + ...(input.propertyUpdates !== undefined && + input.propertyUpdates !== null && { + propertyUpdates: serializeAws_restJson1PropertyRequests(input.propertyUpdates, context), + }), + ...(input.updateType !== undefined && input.updateType !== null && { updateType: input.updateType }), + }; +}; + +const serializeAws_restJson1ComponentUpdatesMapRequest = ( + input: { [key: string]: ComponentUpdateRequest }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1ComponentUpdateRequest(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1Configuration = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1DataConnector = (input: DataConnector, context: __SerdeContext): any => { + return { + ...(input.isNative !== undefined && input.isNative !== null && { isNative: input.isNative }), + ...(input.lambda !== undefined && + input.lambda !== null && { lambda: serializeAws_restJson1LambdaFunction(input.lambda, context) }), + }; +}; + +const serializeAws_restJson1DataType = (input: DataType, context: __SerdeContext): any => { + return { + ...(input.allowedValues !== undefined && + input.allowedValues !== null && { + allowedValues: serializeAws_restJson1DataValueList(input.allowedValues, context), + }), + ...(input.nestedType !== undefined && + input.nestedType !== null && { nestedType: serializeAws_restJson1DataType(input.nestedType, context) }), + ...(input.relationship !== undefined && + input.relationship !== null && { relationship: serializeAws_restJson1Relationship(input.relationship, context) }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + ...(input.unitOfMeasure !== undefined && input.unitOfMeasure !== null && { unitOfMeasure: input.unitOfMeasure }), + }; +}; + +const serializeAws_restJson1DataValue = (input: DataValue, context: __SerdeContext): any => { + return { + ...(input.booleanValue !== undefined && input.booleanValue !== null && { booleanValue: input.booleanValue }), + ...(input.doubleValue !== undefined && + input.doubleValue !== null && { doubleValue: __serializeFloat(input.doubleValue) }), + ...(input.expression !== undefined && input.expression !== null && { expression: input.expression }), + ...(input.integerValue !== undefined && input.integerValue !== null && { integerValue: input.integerValue }), + ...(input.listValue !== undefined && + input.listValue !== null && { listValue: serializeAws_restJson1DataValueList(input.listValue, context) }), + ...(input.longValue !== undefined && input.longValue !== null && { longValue: input.longValue }), + ...(input.mapValue !== undefined && + input.mapValue !== null && { mapValue: serializeAws_restJson1DataValueMap(input.mapValue, context) }), + ...(input.relationshipValue !== undefined && + input.relationshipValue !== null && { + relationshipValue: serializeAws_restJson1RelationshipValue(input.relationshipValue, context), + }), + ...(input.stringValue !== undefined && input.stringValue !== null && { stringValue: input.stringValue }), + }; +}; + +const serializeAws_restJson1DataValueList = (input: DataValue[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1DataValue(entry, context); + }); +}; + +const serializeAws_restJson1DataValueMap = (input: { [key: string]: DataValue }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1DataValue(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1EntityPropertyReference = ( + input: EntityPropertyReference, + context: __SerdeContext +): any => { + return { + ...(input.componentName !== undefined && input.componentName !== null && { componentName: input.componentName }), + ...(input.entityId !== undefined && input.entityId !== null && { entityId: input.entityId }), + ...(input.externalIdProperty !== undefined && + input.externalIdProperty !== null && { + externalIdProperty: serializeAws_restJson1ExternalIdProperty(input.externalIdProperty, context), + }), + ...(input.propertyName !== undefined && input.propertyName !== null && { propertyName: input.propertyName }), + }; +}; + +const serializeAws_restJson1Entries = (input: PropertyValueEntry[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PropertyValueEntry(entry, context); + }); +}; + +const serializeAws_restJson1ExtendsFrom = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1ExternalIdProperty = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1FunctionRequest = (input: FunctionRequest, context: __SerdeContext): any => { + return { + ...(input.implementedBy !== undefined && + input.implementedBy !== null && { + implementedBy: serializeAws_restJson1DataConnector(input.implementedBy, context), + }), + ...(input.requiredProperties !== undefined && + input.requiredProperties !== null && { + requiredProperties: serializeAws_restJson1RequiredProperties(input.requiredProperties, context), + }), + ...(input.scope !== undefined && input.scope !== null && { scope: input.scope }), + }; +}; + +const serializeAws_restJson1FunctionsRequest = ( + input: { [key: string]: FunctionRequest }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1FunctionRequest(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1InterpolationParameters = ( + input: InterpolationParameters, + context: __SerdeContext +): any => { + return { + ...(input.interpolationType !== undefined && + input.interpolationType !== null && { interpolationType: input.interpolationType }), + ...(input.intervalInSeconds !== undefined && + input.intervalInSeconds !== null && { intervalInSeconds: input.intervalInSeconds }), + }; +}; + +const serializeAws_restJson1LambdaFunction = (input: LambdaFunction, context: __SerdeContext): any => { + return { + ...(input.arn !== undefined && input.arn !== null && { arn: input.arn }), + }; +}; + +const serializeAws_restJson1ListComponentTypesFilter = ( + input: ListComponentTypesFilter, + context: __SerdeContext +): any => { + return ListComponentTypesFilter.visit(input, { + extendsFrom: (value) => ({ extendsFrom: value }), + isAbstract: (value) => ({ isAbstract: value }), + namespace: (value) => ({ namespace: value }), + _: (name, value) => ({ name: value } as any), + }); +}; + +const serializeAws_restJson1ListComponentTypesFilters = ( + input: ListComponentTypesFilter[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ListComponentTypesFilter(entry, context); + }); +}; + +const serializeAws_restJson1ListEntitiesFilter = (input: ListEntitiesFilter, context: __SerdeContext): any => { + return ListEntitiesFilter.visit(input, { + componentTypeId: (value) => ({ componentTypeId: value }), + parentEntityId: (value) => ({ parentEntityId: value }), + _: (name, value) => ({ name: value } as any), + }); +}; + +const serializeAws_restJson1ListEntitiesFilters = (input: ListEntitiesFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ListEntitiesFilter(entry, context); + }); +}; + +const serializeAws_restJson1ParentEntityUpdateRequest = ( + input: ParentEntityUpdateRequest, + context: __SerdeContext +): any => { + return { + ...(input.parentEntityId !== undefined && + input.parentEntityId !== null && { parentEntityId: input.parentEntityId }), + ...(input.updateType !== undefined && input.updateType !== null && { updateType: input.updateType }), + }; +}; + +const serializeAws_restJson1PropertyDefinitionRequest = ( + input: PropertyDefinitionRequest, + context: __SerdeContext +): any => { + return { + ...(input.configuration !== undefined && + input.configuration !== null && { + configuration: serializeAws_restJson1Configuration(input.configuration, context), + }), + ...(input.dataType !== undefined && + input.dataType !== null && { dataType: serializeAws_restJson1DataType(input.dataType, context) }), + ...(input.defaultValue !== undefined && + input.defaultValue !== null && { defaultValue: serializeAws_restJson1DataValue(input.defaultValue, context) }), + ...(input.isExternalId !== undefined && input.isExternalId !== null && { isExternalId: input.isExternalId }), + ...(input.isRequiredInEntity !== undefined && + input.isRequiredInEntity !== null && { isRequiredInEntity: input.isRequiredInEntity }), + ...(input.isStoredExternally !== undefined && + input.isStoredExternally !== null && { isStoredExternally: input.isStoredExternally }), + ...(input.isTimeSeries !== undefined && input.isTimeSeries !== null && { isTimeSeries: input.isTimeSeries }), + }; +}; + +const serializeAws_restJson1PropertyDefinitionsRequest = ( + input: { [key: string]: PropertyDefinitionRequest }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1PropertyDefinitionRequest(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1PropertyFilter = (input: PropertyFilter, context: __SerdeContext): any => { + return { + ...(input.operator !== undefined && input.operator !== null && { operator: input.operator }), + ...(input.propertyName !== undefined && input.propertyName !== null && { propertyName: input.propertyName }), + ...(input.value !== undefined && + input.value !== null && { value: serializeAws_restJson1DataValue(input.value, context) }), + }; +}; + +const serializeAws_restJson1PropertyFilters = (input: PropertyFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PropertyFilter(entry, context); + }); +}; + +const serializeAws_restJson1PropertyRequest = (input: PropertyRequest, context: __SerdeContext): any => { + return { + ...(input.definition !== undefined && + input.definition !== null && { + definition: serializeAws_restJson1PropertyDefinitionRequest(input.definition, context), + }), + ...(input.updateType !== undefined && input.updateType !== null && { updateType: input.updateType }), + ...(input.value !== undefined && + input.value !== null && { value: serializeAws_restJson1DataValue(input.value, context) }), + }; +}; + +const serializeAws_restJson1PropertyRequests = ( + input: { [key: string]: PropertyRequest }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1PropertyRequest(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1PropertyValue = (input: PropertyValue, context: __SerdeContext): any => { + return { + ...(input.timestamp !== undefined && + input.timestamp !== null && { timestamp: Math.round(input.timestamp.getTime() / 1000) }), + ...(input.value !== undefined && + input.value !== null && { value: serializeAws_restJson1DataValue(input.value, context) }), + }; +}; + +const serializeAws_restJson1PropertyValueEntry = (input: PropertyValueEntry, context: __SerdeContext): any => { + return { + ...(input.entityPropertyReference !== undefined && + input.entityPropertyReference !== null && { + entityPropertyReference: serializeAws_restJson1EntityPropertyReference(input.entityPropertyReference, context), + }), + ...(input.propertyValues !== undefined && + input.propertyValues !== null && { + propertyValues: serializeAws_restJson1PropertyValues(input.propertyValues, context), + }), + }; +}; + +const serializeAws_restJson1PropertyValues = (input: PropertyValue[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PropertyValue(entry, context); + }); +}; + +const serializeAws_restJson1Relationship = (input: Relationship, context: __SerdeContext): any => { + return { + ...(input.relationshipType !== undefined && + input.relationshipType !== null && { relationshipType: input.relationshipType }), + ...(input.targetComponentTypeId !== undefined && + input.targetComponentTypeId !== null && { targetComponentTypeId: input.targetComponentTypeId }), + }; +}; + +const serializeAws_restJson1RelationshipValue = (input: RelationshipValue, context: __SerdeContext): any => { + return { + ...(input.targetComponentName !== undefined && + input.targetComponentName !== null && { targetComponentName: input.targetComponentName }), + ...(input.targetEntityId !== undefined && + input.targetEntityId !== null && { targetEntityId: input.targetEntityId }), + }; +}; + +const serializeAws_restJson1RequiredProperties = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1SceneCapabilities = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1SelectedPropertyList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1TagMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const deserializeAws_restJson1BatchPutPropertyError = (output: any, context: __SerdeContext): BatchPutPropertyError => { + return { + entry: + output.entry !== undefined && output.entry !== null + ? deserializeAws_restJson1PropertyValueEntry(output.entry, context) + : undefined, + errorCode: __expectString(output.errorCode), + errorMessage: __expectString(output.errorMessage), + } as any; +}; + +const deserializeAws_restJson1BatchPutPropertyErrorEntry = ( + output: any, + context: __SerdeContext +): BatchPutPropertyErrorEntry => { + return { + errors: + output.errors !== undefined && output.errors !== null + ? deserializeAws_restJson1Errors(output.errors, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ComponentResponse = (output: any, context: __SerdeContext): ComponentResponse => { + return { + componentName: __expectString(output.componentName), + componentTypeId: __expectString(output.componentTypeId), + definedIn: __expectString(output.definedIn), + description: __expectString(output.description), + properties: + output.properties !== undefined && output.properties !== null + ? deserializeAws_restJson1PropertyResponses(output.properties, context) + : undefined, + status: + output.status !== undefined && output.status !== null + ? deserializeAws_restJson1Status(output.status, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ComponentsMap = ( + output: any, + context: __SerdeContext +): { [key: string]: ComponentResponse } => { + return Object.entries(output).reduce((acc: { [key: string]: ComponentResponse }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1ComponentResponse(value, context), + }; + }, {}); +}; + +const deserializeAws_restJson1ComponentTypeSummaries = ( + output: any, + context: __SerdeContext +): ComponentTypeSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ComponentTypeSummary(entry, context); + }); +}; + +const deserializeAws_restJson1ComponentTypeSummary = (output: any, context: __SerdeContext): ComponentTypeSummary => { + return { + arn: __expectString(output.arn), + componentTypeId: __expectString(output.componentTypeId), + creationDateTime: + output.creationDateTime !== undefined && output.creationDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDateTime))) + : undefined, + description: __expectString(output.description), + status: + output.status !== undefined && output.status !== null + ? deserializeAws_restJson1Status(output.status, context) + : undefined, + updateDateTime: + output.updateDateTime !== undefined && output.updateDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updateDateTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Configuration = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1DataConnector = (output: any, context: __SerdeContext): DataConnector => { + return { + isNative: __expectBoolean(output.isNative), + lambda: + output.lambda !== undefined && output.lambda !== null + ? deserializeAws_restJson1LambdaFunction(output.lambda, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1DataType = (output: any, context: __SerdeContext): DataType => { + return { + allowedValues: + output.allowedValues !== undefined && output.allowedValues !== null + ? deserializeAws_restJson1DataValueList(output.allowedValues, context) + : undefined, + nestedType: + output.nestedType !== undefined && output.nestedType !== null + ? deserializeAws_restJson1DataType(output.nestedType, context) + : undefined, + relationship: + output.relationship !== undefined && output.relationship !== null + ? deserializeAws_restJson1Relationship(output.relationship, context) + : undefined, + type: __expectString(output.type), + unitOfMeasure: __expectString(output.unitOfMeasure), + } as any; +}; + +const deserializeAws_restJson1DataValue = (output: any, context: __SerdeContext): DataValue => { + return { + booleanValue: __expectBoolean(output.booleanValue), + doubleValue: __limitedParseDouble(output.doubleValue), + expression: __expectString(output.expression), + integerValue: __expectInt32(output.integerValue), + listValue: + output.listValue !== undefined && output.listValue !== null + ? deserializeAws_restJson1DataValueList(output.listValue, context) + : undefined, + longValue: __expectLong(output.longValue), + mapValue: + output.mapValue !== undefined && output.mapValue !== null + ? deserializeAws_restJson1DataValueMap(output.mapValue, context) + : undefined, + relationshipValue: + output.relationshipValue !== undefined && output.relationshipValue !== null + ? deserializeAws_restJson1RelationshipValue(output.relationshipValue, context) + : undefined, + stringValue: __expectString(output.stringValue), + } as any; +}; + +const deserializeAws_restJson1DataValueList = (output: any, context: __SerdeContext): DataValue[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DataValue(entry, context); + }); +}; + +const deserializeAws_restJson1DataValueMap = (output: any, context: __SerdeContext): { [key: string]: DataValue } => { + return Object.entries(output).reduce((acc: { [key: string]: DataValue }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1DataValue(value, context), + }; + }, {}); +}; + +const deserializeAws_restJson1EntityPropertyReference = ( + output: any, + context: __SerdeContext +): EntityPropertyReference => { + return { + componentName: __expectString(output.componentName), + entityId: __expectString(output.entityId), + externalIdProperty: + output.externalIdProperty !== undefined && output.externalIdProperty !== null + ? deserializeAws_restJson1ExternalIdProperty(output.externalIdProperty, context) + : undefined, + propertyName: __expectString(output.propertyName), + } as any; +}; + +const deserializeAws_restJson1EntitySummaries = (output: any, context: __SerdeContext): EntitySummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1EntitySummary(entry, context); + }); +}; + +const deserializeAws_restJson1EntitySummary = (output: any, context: __SerdeContext): EntitySummary => { + return { + arn: __expectString(output.arn), + creationDateTime: + output.creationDateTime !== undefined && output.creationDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDateTime))) + : undefined, + description: __expectString(output.description), + entityId: __expectString(output.entityId), + entityName: __expectString(output.entityName), + hasChildEntities: __expectBoolean(output.hasChildEntities), + parentEntityId: __expectString(output.parentEntityId), + status: + output.status !== undefined && output.status !== null + ? deserializeAws_restJson1Status(output.status, context) + : undefined, + updateDateTime: + output.updateDateTime !== undefined && output.updateDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updateDateTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ErrorDetails = (output: any, context: __SerdeContext): ErrorDetails => { + return { + code: __expectString(output.code), + message: __expectString(output.message), + } as any; +}; + +const deserializeAws_restJson1ErrorEntries = (output: any, context: __SerdeContext): BatchPutPropertyErrorEntry[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1BatchPutPropertyErrorEntry(entry, context); + }); +}; + +const deserializeAws_restJson1Errors = (output: any, context: __SerdeContext): BatchPutPropertyError[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1BatchPutPropertyError(entry, context); + }); +}; + +const deserializeAws_restJson1ExtendsFrom = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1ExternalIdProperty = ( + output: any, + context: __SerdeContext +): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1FunctionResponse = (output: any, context: __SerdeContext): FunctionResponse => { + return { + implementedBy: + output.implementedBy !== undefined && output.implementedBy !== null + ? deserializeAws_restJson1DataConnector(output.implementedBy, context) + : undefined, + isInherited: __expectBoolean(output.isInherited), + requiredProperties: + output.requiredProperties !== undefined && output.requiredProperties !== null + ? deserializeAws_restJson1RequiredProperties(output.requiredProperties, context) + : undefined, + scope: __expectString(output.scope), + } as any; +}; + +const deserializeAws_restJson1FunctionsResponse = ( + output: any, + context: __SerdeContext +): { [key: string]: FunctionResponse } => { + return Object.entries(output).reduce((acc: { [key: string]: FunctionResponse }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1FunctionResponse(value, context), + }; + }, {}); +}; + +const deserializeAws_restJson1LambdaFunction = (output: any, context: __SerdeContext): LambdaFunction => { + return { + arn: __expectString(output.arn), + } as any; +}; + +const deserializeAws_restJson1PropertyDefinitionResponse = ( + output: any, + context: __SerdeContext +): PropertyDefinitionResponse => { + return { + configuration: + output.configuration !== undefined && output.configuration !== null + ? deserializeAws_restJson1Configuration(output.configuration, context) + : undefined, + dataType: + output.dataType !== undefined && output.dataType !== null + ? deserializeAws_restJson1DataType(output.dataType, context) + : undefined, + defaultValue: + output.defaultValue !== undefined && output.defaultValue !== null + ? deserializeAws_restJson1DataValue(output.defaultValue, context) + : undefined, + isExternalId: __expectBoolean(output.isExternalId), + isFinal: __expectBoolean(output.isFinal), + isImported: __expectBoolean(output.isImported), + isInherited: __expectBoolean(output.isInherited), + isRequiredInEntity: __expectBoolean(output.isRequiredInEntity), + isStoredExternally: __expectBoolean(output.isStoredExternally), + isTimeSeries: __expectBoolean(output.isTimeSeries), + } as any; +}; + +const deserializeAws_restJson1PropertyDefinitionsResponse = ( + output: any, + context: __SerdeContext +): { [key: string]: PropertyDefinitionResponse } => { + return Object.entries(output).reduce( + (acc: { [key: string]: PropertyDefinitionResponse }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1PropertyDefinitionResponse(value, context), + }; + }, + {} + ); +}; + +const deserializeAws_restJson1PropertyLatestValue = (output: any, context: __SerdeContext): PropertyLatestValue => { + return { + propertyReference: + output.propertyReference !== undefined && output.propertyReference !== null + ? deserializeAws_restJson1EntityPropertyReference(output.propertyReference, context) + : undefined, + propertyValue: + output.propertyValue !== undefined && output.propertyValue !== null + ? deserializeAws_restJson1DataValue(output.propertyValue, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PropertyLatestValueMap = ( + output: any, + context: __SerdeContext +): { [key: string]: PropertyLatestValue } => { + return Object.entries(output).reduce((acc: { [key: string]: PropertyLatestValue }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1PropertyLatestValue(value, context), + }; + }, {}); +}; + +const deserializeAws_restJson1PropertyResponse = (output: any, context: __SerdeContext): PropertyResponse => { + return { + definition: + output.definition !== undefined && output.definition !== null + ? deserializeAws_restJson1PropertyDefinitionResponse(output.definition, context) + : undefined, + value: + output.value !== undefined && output.value !== null + ? deserializeAws_restJson1DataValue(output.value, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PropertyResponses = ( + output: any, + context: __SerdeContext +): { [key: string]: PropertyResponse } => { + return Object.entries(output).reduce((acc: { [key: string]: PropertyResponse }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: deserializeAws_restJson1PropertyResponse(value, context), + }; + }, {}); +}; + +const deserializeAws_restJson1PropertyValue = (output: any, context: __SerdeContext): PropertyValue => { + return { + timestamp: + output.timestamp !== undefined && output.timestamp !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.timestamp))) + : undefined, + value: + output.value !== undefined && output.value !== null + ? deserializeAws_restJson1DataValue(output.value, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PropertyValueEntry = (output: any, context: __SerdeContext): PropertyValueEntry => { + return { + entityPropertyReference: + output.entityPropertyReference !== undefined && output.entityPropertyReference !== null + ? deserializeAws_restJson1EntityPropertyReference(output.entityPropertyReference, context) + : undefined, + propertyValues: + output.propertyValues !== undefined && output.propertyValues !== null + ? deserializeAws_restJson1PropertyValues(output.propertyValues, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PropertyValueHistory = (output: any, context: __SerdeContext): PropertyValueHistory => { + return { + entityPropertyReference: + output.entityPropertyReference !== undefined && output.entityPropertyReference !== null + ? deserializeAws_restJson1EntityPropertyReference(output.entityPropertyReference, context) + : undefined, + values: + output.values !== undefined && output.values !== null + ? deserializeAws_restJson1Values(output.values, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PropertyValueList = (output: any, context: __SerdeContext): PropertyValueHistory[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PropertyValueHistory(entry, context); + }); +}; + +const deserializeAws_restJson1PropertyValues = (output: any, context: __SerdeContext): PropertyValue[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PropertyValue(entry, context); + }); +}; + +const deserializeAws_restJson1Relationship = (output: any, context: __SerdeContext): Relationship => { + return { + relationshipType: __expectString(output.relationshipType), + targetComponentTypeId: __expectString(output.targetComponentTypeId), + } as any; +}; + +const deserializeAws_restJson1RelationshipValue = (output: any, context: __SerdeContext): RelationshipValue => { + return { + targetComponentName: __expectString(output.targetComponentName), + targetEntityId: __expectString(output.targetEntityId), + } as any; +}; + +const deserializeAws_restJson1RequiredProperties = (output: any, context: __SerdeContext): string[] => { + const uniqueValues = new Set(); + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + const parsedEntry = __expectString(entry) as any; + if (uniqueValues.has(parsedEntry)) { + throw new TypeError('All elements of the set "com.amazonaws.iottwinmaker#RequiredProperties" must be unique.'); + } else { + uniqueValues.add(parsedEntry); + return parsedEntry; + } + }); +}; + +const deserializeAws_restJson1SceneCapabilities = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1SceneSummaries = (output: any, context: __SerdeContext): SceneSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1SceneSummary(entry, context); + }); +}; + +const deserializeAws_restJson1SceneSummary = (output: any, context: __SerdeContext): SceneSummary => { + return { + arn: __expectString(output.arn), + contentLocation: __expectString(output.contentLocation), + creationDateTime: + output.creationDateTime !== undefined && output.creationDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDateTime))) + : undefined, + description: __expectString(output.description), + sceneId: __expectString(output.sceneId), + updateDateTime: + output.updateDateTime !== undefined && output.updateDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updateDateTime))) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Status = (output: any, context: __SerdeContext): Status => { + return { + error: + output.error !== undefined && output.error !== null + ? deserializeAws_restJson1ErrorDetails(output.error, context) + : undefined, + state: __expectString(output.state), + } as any; +}; + +const deserializeAws_restJson1TagMap = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1Values = (output: any, context: __SerdeContext): PropertyValue[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PropertyValue(entry, context); + }); +}; + +const deserializeAws_restJson1WorkspaceSummaries = (output: any, context: __SerdeContext): WorkspaceSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1WorkspaceSummary(entry, context); + }); +}; + +const deserializeAws_restJson1WorkspaceSummary = (output: any, context: __SerdeContext): WorkspaceSummary => { + return { + arn: __expectString(output.arn), + creationDateTime: + output.creationDateTime !== undefined && output.creationDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDateTime))) + : undefined, + description: __expectString(output.description), + updateDateTime: + output.updateDateTime !== undefined && output.updateDateTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.updateDateTime))) + : undefined, + workspaceId: __expectString(output.workspaceId), + } as any; +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-iottwinmaker/src/runtimeConfig.browser.ts b/clients/client-iottwinmaker/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..92a0062d1ffc --- /dev/null +++ b/clients/client-iottwinmaker/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { IoTTwinMakerClientConfig } from "./IoTTwinMakerClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: IoTTwinMakerClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-iottwinmaker/src/runtimeConfig.native.ts b/clients/client-iottwinmaker/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..566a82c5359e --- /dev/null +++ b/clients/client-iottwinmaker/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { IoTTwinMakerClientConfig } from "./IoTTwinMakerClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: IoTTwinMakerClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-iottwinmaker/src/runtimeConfig.shared.ts b/clients/client-iottwinmaker/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..c6e40fa03299 --- /dev/null +++ b/clients/client-iottwinmaker/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { IoTTwinMakerClientConfig } from "./IoTTwinMakerClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: IoTTwinMakerClientConfig) => ({ + apiVersion: "2021-11-29", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "IoTTwinMaker", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-iottwinmaker/src/runtimeConfig.ts b/clients/client-iottwinmaker/src/runtimeConfig.ts new file mode 100644 index 000000000000..ccd3889b0691 --- /dev/null +++ b/clients/client-iottwinmaker/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { IoTTwinMakerClientConfig } from "./IoTTwinMakerClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: IoTTwinMakerClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-iottwinmaker/tsconfig.es.json b/clients/client-iottwinmaker/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-iottwinmaker/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-iottwinmaker/tsconfig.json b/clients/client-iottwinmaker/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-iottwinmaker/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-iottwinmaker/tsconfig.types.json b/clients/client-iottwinmaker/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-iottwinmaker/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-kafka/src/Kafka.ts b/clients/client-kafka/src/Kafka.ts index 91f60352e6e7..fbc53430c76b 100644 --- a/clients/client-kafka/src/Kafka.ts +++ b/clients/client-kafka/src/Kafka.ts @@ -15,6 +15,11 @@ import { CreateClusterCommandInput, CreateClusterCommandOutput, } from "./commands/CreateClusterCommand"; +import { + CreateClusterV2Command, + CreateClusterV2CommandInput, + CreateClusterV2CommandOutput, +} from "./commands/CreateClusterV2Command"; import { CreateConfigurationCommand, CreateConfigurationCommandInput, @@ -40,6 +45,11 @@ import { DescribeClusterOperationCommandInput, DescribeClusterOperationCommandOutput, } from "./commands/DescribeClusterOperationCommand"; +import { + DescribeClusterV2Command, + DescribeClusterV2CommandInput, + DescribeClusterV2CommandOutput, +} from "./commands/DescribeClusterV2Command"; import { DescribeConfigurationCommand, DescribeConfigurationCommandInput, @@ -70,6 +80,11 @@ import { ListClustersCommandInput, ListClustersCommandOutput, } from "./commands/ListClustersCommand"; +import { + ListClustersV2Command, + ListClustersV2CommandInput, + ListClustersV2CommandOutput, +} from "./commands/ListClustersV2Command"; import { ListConfigurationRevisionsCommand, ListConfigurationRevisionsCommandInput, @@ -254,6 +269,38 @@ export class Kafka extends KafkaClient { } } + /** + *

                              Creates a new MSK cluster.

                              + */ + public createClusterV2( + args: CreateClusterV2CommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createClusterV2( + args: CreateClusterV2CommandInput, + cb: (err: any, data?: CreateClusterV2CommandOutput) => void + ): void; + public createClusterV2( + args: CreateClusterV2CommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateClusterV2CommandOutput) => void + ): void; + public createClusterV2( + args: CreateClusterV2CommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateClusterV2CommandOutput) => void), + cb?: (err: any, data?: CreateClusterV2CommandOutput) => void + ): Promise | void { + const command = new CreateClusterV2Command(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                              Creates a new MSK configuration.

                              */ @@ -414,6 +461,38 @@ export class Kafka extends KafkaClient { } } + /** + *

                              Returns a description of the MSK cluster whose Amazon Resource Name (ARN) is specified in the request.

                              + */ + public describeClusterV2( + args: DescribeClusterV2CommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeClusterV2( + args: DescribeClusterV2CommandInput, + cb: (err: any, data?: DescribeClusterV2CommandOutput) => void + ): void; + public describeClusterV2( + args: DescribeClusterV2CommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeClusterV2CommandOutput) => void + ): void; + public describeClusterV2( + args: DescribeClusterV2CommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeClusterV2CommandOutput) => void), + cb?: (err: any, data?: DescribeClusterV2CommandOutput) => void + ): Promise | void { + const command = new DescribeClusterV2Command(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                              Returns a description of this MSK configuration.

                              */ @@ -603,6 +682,38 @@ export class Kafka extends KafkaClient { } } + /** + *

                              Returns a list of all the MSK clusters in the current Region.

                              + */ + public listClustersV2( + args: ListClustersV2CommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listClustersV2( + args: ListClustersV2CommandInput, + cb: (err: any, data?: ListClustersV2CommandOutput) => void + ): void; + public listClustersV2( + args: ListClustersV2CommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListClustersV2CommandOutput) => void + ): void; + public listClustersV2( + args: ListClustersV2CommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListClustersV2CommandOutput) => void), + cb?: (err: any, data?: ListClustersV2CommandOutput) => void + ): Promise | void { + const command = new ListClustersV2Command(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                              Returns a list of all the MSK configurations in this Region.

                              */ diff --git a/clients/client-kafka/src/KafkaClient.ts b/clients/client-kafka/src/KafkaClient.ts index c5a4feaa5070..6474ea5eaf5f 100644 --- a/clients/client-kafka/src/KafkaClient.ts +++ b/clients/client-kafka/src/KafkaClient.ts @@ -58,6 +58,7 @@ import { BatchDisassociateScramSecretCommandOutput, } from "./commands/BatchDisassociateScramSecretCommand"; import { CreateClusterCommandInput, CreateClusterCommandOutput } from "./commands/CreateClusterCommand"; +import { CreateClusterV2CommandInput, CreateClusterV2CommandOutput } from "./commands/CreateClusterV2Command"; import { CreateConfigurationCommandInput, CreateConfigurationCommandOutput, @@ -72,6 +73,7 @@ import { DescribeClusterOperationCommandInput, DescribeClusterOperationCommandOutput, } from "./commands/DescribeClusterOperationCommand"; +import { DescribeClusterV2CommandInput, DescribeClusterV2CommandOutput } from "./commands/DescribeClusterV2Command"; import { DescribeConfigurationCommandInput, DescribeConfigurationCommandOutput, @@ -93,6 +95,7 @@ import { ListClusterOperationsCommandOutput, } from "./commands/ListClusterOperationsCommand"; import { ListClustersCommandInput, ListClustersCommandOutput } from "./commands/ListClustersCommand"; +import { ListClustersV2CommandInput, ListClustersV2CommandOutput } from "./commands/ListClustersV2Command"; import { ListConfigurationRevisionsCommandInput, ListConfigurationRevisionsCommandOutput, @@ -135,17 +138,20 @@ export type ServiceInputTypes = | BatchAssociateScramSecretCommandInput | BatchDisassociateScramSecretCommandInput | CreateClusterCommandInput + | CreateClusterV2CommandInput | CreateConfigurationCommandInput | DeleteClusterCommandInput | DeleteConfigurationCommandInput | DescribeClusterCommandInput | DescribeClusterOperationCommandInput + | DescribeClusterV2CommandInput | DescribeConfigurationCommandInput | DescribeConfigurationRevisionCommandInput | GetBootstrapBrokersCommandInput | GetCompatibleKafkaVersionsCommandInput | ListClusterOperationsCommandInput | ListClustersCommandInput + | ListClustersV2CommandInput | ListConfigurationRevisionsCommandInput | ListConfigurationsCommandInput | ListKafkaVersionsCommandInput @@ -169,17 +175,20 @@ export type ServiceOutputTypes = | BatchAssociateScramSecretCommandOutput | BatchDisassociateScramSecretCommandOutput | CreateClusterCommandOutput + | CreateClusterV2CommandOutput | CreateConfigurationCommandOutput | DeleteClusterCommandOutput | DeleteConfigurationCommandOutput | DescribeClusterCommandOutput | DescribeClusterOperationCommandOutput + | DescribeClusterV2CommandOutput | DescribeConfigurationCommandOutput | DescribeConfigurationRevisionCommandOutput | GetBootstrapBrokersCommandOutput | GetCompatibleKafkaVersionsCommandOutput | ListClusterOperationsCommandOutput | ListClustersCommandOutput + | ListClustersV2CommandOutput | ListConfigurationRevisionsCommandOutput | ListConfigurationsCommandOutput | ListKafkaVersionsCommandOutput diff --git a/clients/client-kafka/src/commands/CreateClusterV2Command.ts b/clients/client-kafka/src/commands/CreateClusterV2Command.ts new file mode 100644 index 000000000000..6feae9830a2c --- /dev/null +++ b/clients/client-kafka/src/commands/CreateClusterV2Command.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KafkaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KafkaClient"; +import { CreateClusterV2Request, CreateClusterV2Response } from "../models/models_0"; +import { + deserializeAws_restJson1CreateClusterV2Command, + serializeAws_restJson1CreateClusterV2Command, +} from "../protocols/Aws_restJson1"; + +export interface CreateClusterV2CommandInput extends CreateClusterV2Request {} +export interface CreateClusterV2CommandOutput extends CreateClusterV2Response, __MetadataBearer {} + +/** + *

                              Creates a new MSK cluster.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaClient, CreateClusterV2Command } from "@aws-sdk/client-kafka"; // ES Modules import + * // const { KafkaClient, CreateClusterV2Command } = require("@aws-sdk/client-kafka"); // CommonJS import + * const client = new KafkaClient(config); + * const command = new CreateClusterV2Command(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateClusterV2CommandInput} for command's `input` shape. + * @see {@link CreateClusterV2CommandOutput} for command's `response` shape. + * @see {@link KafkaClientResolvedConfig | config} for KafkaClient's `config` shape. + * + */ +export class CreateClusterV2Command extends $Command< + CreateClusterV2CommandInput, + CreateClusterV2CommandOutput, + KafkaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateClusterV2CommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KafkaClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KafkaClient"; + const commandName = "CreateClusterV2Command"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateClusterV2Request.filterSensitiveLog, + outputFilterSensitiveLog: CreateClusterV2Response.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateClusterV2CommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateClusterV2Command(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateClusterV2Command(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-kafka/src/commands/DescribeClusterV2Command.ts b/clients/client-kafka/src/commands/DescribeClusterV2Command.ts new file mode 100644 index 000000000000..f57ad0e09eac --- /dev/null +++ b/clients/client-kafka/src/commands/DescribeClusterV2Command.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KafkaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KafkaClient"; +import { DescribeClusterV2Request, DescribeClusterV2Response } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeClusterV2Command, + serializeAws_restJson1DescribeClusterV2Command, +} from "../protocols/Aws_restJson1"; + +export interface DescribeClusterV2CommandInput extends DescribeClusterV2Request {} +export interface DescribeClusterV2CommandOutput extends DescribeClusterV2Response, __MetadataBearer {} + +/** + *

                              Returns a description of the MSK cluster whose Amazon Resource Name (ARN) is specified in the request.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaClient, DescribeClusterV2Command } from "@aws-sdk/client-kafka"; // ES Modules import + * // const { KafkaClient, DescribeClusterV2Command } = require("@aws-sdk/client-kafka"); // CommonJS import + * const client = new KafkaClient(config); + * const command = new DescribeClusterV2Command(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeClusterV2CommandInput} for command's `input` shape. + * @see {@link DescribeClusterV2CommandOutput} for command's `response` shape. + * @see {@link KafkaClientResolvedConfig | config} for KafkaClient's `config` shape. + * + */ +export class DescribeClusterV2Command extends $Command< + DescribeClusterV2CommandInput, + DescribeClusterV2CommandOutput, + KafkaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeClusterV2CommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KafkaClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KafkaClient"; + const commandName = "DescribeClusterV2Command"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeClusterV2Request.filterSensitiveLog, + outputFilterSensitiveLog: DescribeClusterV2Response.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeClusterV2CommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeClusterV2Command(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DescribeClusterV2Command(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-kafka/src/commands/ListClustersV2Command.ts b/clients/client-kafka/src/commands/ListClustersV2Command.ts new file mode 100644 index 000000000000..65238ca31320 --- /dev/null +++ b/clients/client-kafka/src/commands/ListClustersV2Command.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KafkaClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KafkaClient"; +import { ListClustersV2Request, ListClustersV2Response } from "../models/models_0"; +import { + deserializeAws_restJson1ListClustersV2Command, + serializeAws_restJson1ListClustersV2Command, +} from "../protocols/Aws_restJson1"; + +export interface ListClustersV2CommandInput extends ListClustersV2Request {} +export interface ListClustersV2CommandOutput extends ListClustersV2Response, __MetadataBearer {} + +/** + *

                              Returns a list of all the MSK clusters in the current Region.

                              + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KafkaClient, ListClustersV2Command } from "@aws-sdk/client-kafka"; // ES Modules import + * // const { KafkaClient, ListClustersV2Command } = require("@aws-sdk/client-kafka"); // CommonJS import + * const client = new KafkaClient(config); + * const command = new ListClustersV2Command(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListClustersV2CommandInput} for command's `input` shape. + * @see {@link ListClustersV2CommandOutput} for command's `response` shape. + * @see {@link KafkaClientResolvedConfig | config} for KafkaClient's `config` shape. + * + */ +export class ListClustersV2Command extends $Command< + ListClustersV2CommandInput, + ListClustersV2CommandOutput, + KafkaClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListClustersV2CommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KafkaClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KafkaClient"; + const commandName = "ListClustersV2Command"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListClustersV2Request.filterSensitiveLog, + outputFilterSensitiveLog: ListClustersV2Response.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListClustersV2CommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListClustersV2Command(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListClustersV2Command(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-kafka/src/commands/index.ts b/clients/client-kafka/src/commands/index.ts index 1e58edca5849..6eeba814b960 100644 --- a/clients/client-kafka/src/commands/index.ts +++ b/clients/client-kafka/src/commands/index.ts @@ -1,17 +1,20 @@ export * from "./BatchAssociateScramSecretCommand"; export * from "./BatchDisassociateScramSecretCommand"; export * from "./CreateClusterCommand"; +export * from "./CreateClusterV2Command"; export * from "./CreateConfigurationCommand"; export * from "./DeleteClusterCommand"; export * from "./DeleteConfigurationCommand"; export * from "./DescribeClusterCommand"; export * from "./DescribeClusterOperationCommand"; +export * from "./DescribeClusterV2Command"; export * from "./DescribeConfigurationCommand"; export * from "./DescribeConfigurationRevisionCommand"; export * from "./GetBootstrapBrokersCommand"; export * from "./GetCompatibleKafkaVersionsCommand"; export * from "./ListClusterOperationsCommand"; export * from "./ListClustersCommand"; +export * from "./ListClustersV2Command"; export * from "./ListConfigurationRevisionsCommand"; export * from "./ListConfigurationsCommand"; export * from "./ListKafkaVersionsCommand"; diff --git a/clients/client-kafka/src/models/models_0.ts b/clients/client-kafka/src/models/models_0.ts index bab4d1a2c6e8..1d19408a663e 100644 --- a/clients/client-kafka/src/models/models_0.ts +++ b/clients/client-kafka/src/models/models_0.ts @@ -24,6 +24,11 @@ export namespace BrokerEBSVolumeInfo { }); } +export enum ClusterType { + PROVISIONED = "PROVISIONED", + SERVERLESS = "SERVERLESS", +} + export enum BrokerAZDistribution { DEFAULT = "DEFAULT", } @@ -456,15 +461,340 @@ export namespace BrokerLogs { }); } -export interface LoggingInfo { - BrokerLogs: BrokerLogs | undefined; +export interface LoggingInfo { + BrokerLogs: BrokerLogs | undefined; +} + +export namespace LoggingInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LoggingInfo): any => ({ + ...obj, + }); +} + +/** + *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              + */ +export interface JmxExporterInfo { + /** + *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              + */ + EnabledInBroker: boolean | undefined; +} + +export namespace JmxExporterInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: JmxExporterInfo): any => ({ + ...obj, + }); +} + +/** + *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              + */ +export interface NodeExporterInfo { + /** + *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              + */ + EnabledInBroker: boolean | undefined; +} + +export namespace NodeExporterInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NodeExporterInfo): any => ({ + ...obj, + }); +} + +/** + *

                              Prometheus settings.

                              + */ +export interface PrometheusInfo { + /** + *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              + */ + JmxExporter?: JmxExporterInfo; + + /** + *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              + */ + NodeExporter?: NodeExporterInfo; +} + +export namespace PrometheusInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PrometheusInfo): any => ({ + ...obj, + }); +} + +/** + *

                              JMX and Node monitoring for the MSK cluster.

                              + */ +export interface OpenMonitoringInfo { + /** + *

                              Prometheus settings.

                              + */ + Prometheus: PrometheusInfo | undefined; +} + +export namespace OpenMonitoringInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: OpenMonitoringInfo): any => ({ + ...obj, + }); +} + +/** + *

                              Provisioned cluster.

                              + */ +export interface Provisioned { + /** + *

                              Information about the brokers.

                              + */ + BrokerNodeGroupInfo: BrokerNodeGroupInfo | undefined; + + /** + *

                              Information about the Apache Kafka version deployed on the brokers.

                              + */ + CurrentBrokerSoftwareInfo?: BrokerSoftwareInfo; + + /** + *

                              Includes all client authentication information.

                              + */ + ClientAuthentication?: ClientAuthentication; + + /** + *

                              Includes all encryption-related information.

                              + */ + EncryptionInfo?: EncryptionInfo; + + /** + *

                              Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.

                              + */ + EnhancedMonitoring?: EnhancedMonitoring | string; + + /** + *

                              The settings for open monitoring.

                              + */ + OpenMonitoring?: OpenMonitoringInfo; + + /** + *

                              Log delivery information for the cluster.

                              + */ + LoggingInfo?: LoggingInfo; + + /** + *

                              The number of broker nodes in the cluster.

                              + */ + NumberOfBrokerNodes: number | undefined; + + /** + *

                              The connection string to use to connect to the Apache ZooKeeper cluster.

                              + */ + ZookeeperConnectString?: string; + + /** + *

                              The connection string to use to connect to the Apache ZooKeeper cluster on a TLS port.

                              + */ + ZookeeperConnectStringTls?: string; +} + +export namespace Provisioned { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Provisioned): any => ({ + ...obj, + }); +} + +/** + *

                              Details for client authentication using SASL.

                              + */ +export interface ServerlessSasl { + /** + *

                              Indicates whether IAM access control is enabled.

                              + */ + Iam?: Iam; +} + +export namespace ServerlessSasl { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServerlessSasl): any => ({ + ...obj, + }); +} + +/** + *

                              Includes all client authentication information.

                              + */ +export interface ServerlessClientAuthentication { + /** + *

                              Details for ClientAuthentication using SASL.

                              + */ + Sasl?: ServerlessSasl; +} + +export namespace ServerlessClientAuthentication { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServerlessClientAuthentication): any => ({ + ...obj, + }); +} + +/** + *

                              The configuration of the Amazon VPCs for the cluster.

                              + */ +export interface VpcConfig { + /** + *

                              The IDs of the subnets associated with the cluster.

                              + */ + SubnetIds: string[] | undefined; + + /** + *

                              The IDs of the security groups associated with the cluster.

                              + */ + SecurityGroupIds?: string[]; +} + +export namespace VpcConfig { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VpcConfig): any => ({ + ...obj, + }); +} + +/** + *

                              Serverless cluster.

                              + */ +export interface Serverless { + /** + *

                              The configuration of the Amazon VPCs for the cluster.

                              + */ + VpcConfigs: VpcConfig[] | undefined; + + /** + *

                              Includes all client authentication information.

                              + */ + ClientAuthentication?: ServerlessClientAuthentication; +} + +export namespace Serverless { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Serverless): any => ({ + ...obj, + }); +} + +export enum ClusterState { + ACTIVE = "ACTIVE", + CREATING = "CREATING", + DELETING = "DELETING", + FAILED = "FAILED", + HEALING = "HEALING", + MAINTENANCE = "MAINTENANCE", + REBOOTING_BROKER = "REBOOTING_BROKER", + UPDATING = "UPDATING", +} + +export interface StateInfo { + Code?: string; + Message?: string; +} + +export namespace StateInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StateInfo): any => ({ + ...obj, + }); +} + +/** + *

                              Returns information about a cluster.

                              + */ +export interface Cluster { + /** + *

                              The Amazon Resource Name (ARN) that uniquely identifies a cluster operation.

                              + */ + ActiveOperationArn?: string; + + /** + *

                              Cluster Type.

                              + */ + ClusterType?: ClusterType | string; + + /** + *

                              The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                              + */ + ClusterArn?: string; + + /** + *

                              The name of the cluster.

                              + */ + ClusterName?: string; + + /** + *

                              The time when the cluster was created.

                              + */ + CreationTime?: Date; + + /** + *

                              The current version of the MSK cluster.

                              + */ + CurrentVersion?: string; + + /** + *

                              The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, HEALING, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

                              + */ + State?: ClusterState | string; + + /** + *

                              State Info for the Amazon MSK cluster.

                              + */ + StateInfo?: StateInfo; + + /** + *

                              Tags attached to the cluster.

                              + */ + Tags?: { [key: string]: string }; + + /** + *

                              Information about the provisioned cluster.

                              + */ + Provisioned?: Provisioned; + + /** + *

                              Information about the serverless cluster.

                              + */ + Serverless?: Serverless; } -export namespace LoggingInfo { +export namespace Cluster { /** * @internal */ - export const filterSensitiveLog = (obj: LoggingInfo): any => ({ + export const filterSensitiveLog = (obj: Cluster): any => ({ ...obj, }); } @@ -550,31 +880,6 @@ export namespace OpenMonitoring { }); } -export enum ClusterState { - ACTIVE = "ACTIVE", - CREATING = "CREATING", - DELETING = "DELETING", - FAILED = "FAILED", - HEALING = "HEALING", - MAINTENANCE = "MAINTENANCE", - REBOOTING_BROKER = "REBOOTING_BROKER", - UPDATING = "UPDATING", -} - -export interface StateInfo { - Code?: string; - Message?: string; -} - -export namespace StateInfo { - /** - * @internal - */ - export const filterSensitiveLog = (obj: StateInfo): any => ({ - ...obj, - }); -} - /** *

                              Returns information about a cluster.

                              */ @@ -1485,105 +1790,110 @@ export namespace ConflictException { }); } -/** - *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              - */ -export interface JmxExporterInfo { +export interface CreateClusterRequest { /** - *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              + *

                              Information about the broker nodes in the cluster.

                              */ - EnabledInBroker: boolean | undefined; -} + BrokerNodeGroupInfo: BrokerNodeGroupInfo | undefined; -export namespace JmxExporterInfo { /** - * @internal + *

                              Includes all client authentication related information.

                              */ - export const filterSensitiveLog = (obj: JmxExporterInfo): any => ({ - ...obj, - }); -} + ClientAuthentication?: ClientAuthentication; -/** - *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              - */ -export interface NodeExporterInfo { /** - *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              + *

                              The name of the cluster.

                              */ - EnabledInBroker: boolean | undefined; -} + ClusterName: string | undefined; -export namespace NodeExporterInfo { /** - * @internal + *

                              Represents the configuration that you want MSK to use for the brokers in a cluster.

                              */ - export const filterSensitiveLog = (obj: NodeExporterInfo): any => ({ - ...obj, - }); -} + ConfigurationInfo?: ConfigurationInfo; -/** - *

                              Prometheus settings.

                              - */ -export interface PrometheusInfo { /** - *

                              Indicates whether you want to turn on or turn off the JMX Exporter.

                              + *

                              Includes all encryption-related information.

                              */ - JmxExporter?: JmxExporterInfo; + EncryptionInfo?: EncryptionInfo; /** - *

                              Indicates whether you want to turn on or turn off the Node Exporter.

                              + *

                              Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.

                              */ - NodeExporter?: NodeExporterInfo; + EnhancedMonitoring?: EnhancedMonitoring | string; + + /** + *

                              The settings for open monitoring.

                              + */ + OpenMonitoring?: OpenMonitoringInfo; + + /** + *

                              The version of Apache Kafka.

                              + */ + KafkaVersion: string | undefined; + + LoggingInfo?: LoggingInfo; + /** + *

                              The number of broker nodes in the cluster.

                              + */ + NumberOfBrokerNodes: number | undefined; + + /** + *

                              Create tags when creating the cluster.

                              + */ + Tags?: { [key: string]: string }; } -export namespace PrometheusInfo { +export namespace CreateClusterRequest { /** * @internal */ - export const filterSensitiveLog = (obj: PrometheusInfo): any => ({ + export const filterSensitiveLog = (obj: CreateClusterRequest): any => ({ ...obj, }); } -/** - *

                              JMX and Node monitoring for the MSK cluster.

                              - */ -export interface OpenMonitoringInfo { +export interface CreateClusterResponse { /** - *

                              Prometheus settings.

                              + *

                              The Amazon Resource Name (ARN) of the cluster.

                              */ - Prometheus: PrometheusInfo | undefined; + ClusterArn?: string; + + /** + *

                              The name of the MSK cluster.

                              + */ + ClusterName?: string; + + /** + *

                              The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, HEALING, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

                              + */ + State?: ClusterState | string; } -export namespace OpenMonitoringInfo { +export namespace CreateClusterResponse { /** * @internal */ - export const filterSensitiveLog = (obj: OpenMonitoringInfo): any => ({ + export const filterSensitiveLog = (obj: CreateClusterResponse): any => ({ ...obj, }); } -export interface CreateClusterRequest { +/** + *

                              Provisioned cluster request.

                              + */ +export interface ProvisionedRequest { /** - *

                              Information about the broker nodes in the cluster.

                              + *

                              Information about the brokers.

                              */ BrokerNodeGroupInfo: BrokerNodeGroupInfo | undefined; /** - *

                              Includes all client authentication related information.

                              + *

                              Includes all client authentication information.

                              */ ClientAuthentication?: ClientAuthentication; /** - *

                              The name of the cluster.

                              - */ - ClusterName: string | undefined; - - /** - *

                              Represents the configuration that you want MSK to use for the brokers in a cluster.

                              + *

                              Represents the configuration that you want Amazon MSK to use for the brokers in a cluster.

                              */ ConfigurationInfo?: ConfigurationInfo; @@ -1603,32 +1913,86 @@ export interface CreateClusterRequest { OpenMonitoring?: OpenMonitoringInfo; /** - *

                              The version of Apache Kafka.

                              + *

                              The Apache Kafka version that you want for the cluster.

                              */ KafkaVersion: string | undefined; + /** + *

                              Log delivery information for the cluster.

                              + */ LoggingInfo?: LoggingInfo; + /** *

                              The number of broker nodes in the cluster.

                              */ NumberOfBrokerNodes: number | undefined; +} +export namespace ProvisionedRequest { /** - *

                              Create tags when creating the cluster.

                              + * @internal + */ + export const filterSensitiveLog = (obj: ProvisionedRequest): any => ({ + ...obj, + }); +} + +/** + *

                              Serverless cluster request.

                              + */ +export interface ServerlessRequest { + /** + *

                              The configuration of the Amazon VPCs for the cluster.

                              + */ + VpcConfigs: VpcConfig[] | undefined; + + /** + *

                              Includes all client authentication information.

                              + */ + ClientAuthentication?: ServerlessClientAuthentication; +} + +export namespace ServerlessRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServerlessRequest): any => ({ + ...obj, + }); +} + +export interface CreateClusterV2Request { + /** + *

                              The name of the cluster.

                              + */ + ClusterName: string | undefined; + + /** + *

                              A map of tags that you want the cluster to have.

                              */ Tags?: { [key: string]: string }; + + /** + *

                              Information about the provisioned cluster.

                              + */ + Provisioned?: ProvisionedRequest; + + /** + *

                              Information about the serverless cluster.

                              + */ + Serverless?: ServerlessRequest; } -export namespace CreateClusterRequest { +export namespace CreateClusterV2Request { /** * @internal */ - export const filterSensitiveLog = (obj: CreateClusterRequest): any => ({ + export const filterSensitiveLog = (obj: CreateClusterV2Request): any => ({ ...obj, }); } -export interface CreateClusterResponse { +export interface CreateClusterV2Response { /** *

                              The Amazon Resource Name (ARN) of the cluster.

                              */ @@ -1643,13 +2007,18 @@ export interface CreateClusterResponse { *

                              The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, HEALING, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

                              */ State?: ClusterState | string; + + /** + *

                              The type of the cluster. The possible states are PROVISIONED or SERVERLESS.

                              + */ + ClusterType?: ClusterType | string; } -export namespace CreateClusterResponse { +export namespace CreateClusterV2Response { /** * @internal */ - export const filterSensitiveLog = (obj: CreateClusterResponse): any => ({ + export const filterSensitiveLog = (obj: CreateClusterV2Response): any => ({ ...obj, }); } @@ -1865,6 +2234,38 @@ export namespace DescribeClusterOperationResponse { }); } +export interface DescribeClusterV2Request { + /** + *

                              The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                              + */ + ClusterArn: string | undefined; +} + +export namespace DescribeClusterV2Request { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeClusterV2Request): any => ({ + ...obj, + }); +} + +export interface DescribeClusterV2Response { + /** + *

                              The cluster information.

                              + */ + ClusterInfo?: Cluster; +} + +export namespace DescribeClusterV2Response { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeClusterV2Response): any => ({ + ...obj, + }); +} + export interface DescribeConfigurationRequest { /** *

                              The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.

                              @@ -2176,6 +2577,60 @@ export namespace ListClustersResponse { }); } +export interface ListClustersV2Request { + /** + *

                              Specify a prefix of the names of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.

                              + */ + ClusterNameFilter?: string; + + /** + *

                              Specify either PROVISIONED or SERVERLESS.

                              + */ + ClusterTypeFilter?: string; + + /** + *

                              The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.

                              + */ + MaxResults?: number; + + /** + *

                              The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. + * To get the next batch, provide this token in your next request.

                              + */ + NextToken?: string; +} + +export namespace ListClustersV2Request { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListClustersV2Request): any => ({ + ...obj, + }); +} + +export interface ListClustersV2Response { + /** + *

                              Information on each of the MSK clusters in the response.

                              + */ + ClusterInfoList?: Cluster[]; + + /** + *

                              The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. + * To get another batch of clusters, provide this token in your next request.

                              + */ + NextToken?: string; +} + +export namespace ListClustersV2Response { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListClustersV2Response): any => ({ + ...obj, + }); +} + export interface ListConfigurationRevisionsRequest { /** *

                              The Amazon Resource Name (ARN) that uniquely identifies an MSK configuration and all of its revisions.

                              diff --git a/clients/client-kafka/src/pagination/ListClustersV2Paginator.ts b/clients/client-kafka/src/pagination/ListClustersV2Paginator.ts new file mode 100644 index 000000000000..a7d3711b5d21 --- /dev/null +++ b/clients/client-kafka/src/pagination/ListClustersV2Paginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListClustersV2Command, + ListClustersV2CommandInput, + ListClustersV2CommandOutput, +} from "../commands/ListClustersV2Command"; +import { Kafka } from "../Kafka"; +import { KafkaClient } from "../KafkaClient"; +import { KafkaPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: KafkaClient, + input: ListClustersV2CommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListClustersV2Command(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Kafka, + input: ListClustersV2CommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listClustersV2(input, ...args); +}; +export async function* paginateListClustersV2( + config: KafkaPaginationConfiguration, + input: ListClustersV2CommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListClustersV2CommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Kafka) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof KafkaClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Kafka | KafkaClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-kafka/src/pagination/index.ts b/clients/client-kafka/src/pagination/index.ts index 146d183d3e3a..aff06ed615c7 100644 --- a/clients/client-kafka/src/pagination/index.ts +++ b/clients/client-kafka/src/pagination/index.ts @@ -1,6 +1,7 @@ export * from "./Interfaces"; export * from "./ListClusterOperationsPaginator"; export * from "./ListClustersPaginator"; +export * from "./ListClustersV2Paginator"; export * from "./ListConfigurationRevisionsPaginator"; export * from "./ListConfigurationsPaginator"; export * from "./ListKafkaVersionsPaginator"; diff --git a/clients/client-kafka/src/protocols/Aws_restJson1.ts b/clients/client-kafka/src/protocols/Aws_restJson1.ts index 78b662b0c981..60a902522bb2 100644 --- a/clients/client-kafka/src/protocols/Aws_restJson1.ts +++ b/clients/client-kafka/src/protocols/Aws_restJson1.ts @@ -27,6 +27,7 @@ import { BatchDisassociateScramSecretCommandOutput, } from "../commands/BatchDisassociateScramSecretCommand"; import { CreateClusterCommandInput, CreateClusterCommandOutput } from "../commands/CreateClusterCommand"; +import { CreateClusterV2CommandInput, CreateClusterV2CommandOutput } from "../commands/CreateClusterV2Command"; import { CreateConfigurationCommandInput, CreateConfigurationCommandOutput, @@ -41,6 +42,7 @@ import { DescribeClusterOperationCommandInput, DescribeClusterOperationCommandOutput, } from "../commands/DescribeClusterOperationCommand"; +import { DescribeClusterV2CommandInput, DescribeClusterV2CommandOutput } from "../commands/DescribeClusterV2Command"; import { DescribeConfigurationCommandInput, DescribeConfigurationCommandOutput, @@ -62,6 +64,7 @@ import { ListClusterOperationsCommandOutput, } from "../commands/ListClusterOperationsCommand"; import { ListClustersCommandInput, ListClustersCommandOutput } from "../commands/ListClustersCommand"; +import { ListClustersV2CommandInput, ListClustersV2CommandOutput } from "../commands/ListClustersV2Command"; import { ListConfigurationRevisionsCommandInput, ListConfigurationRevisionsCommandOutput, @@ -107,6 +110,7 @@ import { BrokerSoftwareInfo, ClientAuthentication, CloudWatchLogs, + Cluster, ClusterInfo, ClusterOperationInfo, ClusterOperationStep, @@ -139,10 +143,16 @@ import { OpenMonitoringInfo, Prometheus, PrometheusInfo, + Provisioned, + ProvisionedRequest, PublicAccess, S3, Sasl, Scram, + Serverless, + ServerlessClientAuthentication, + ServerlessRequest, + ServerlessSasl, ServiceUnavailableException, StateInfo, StorageInfo, @@ -151,6 +161,7 @@ import { Unauthenticated, UnauthorizedException, UnprocessedScramSecret, + VpcConfig, ZookeeperNodeInfo, } from "../models/models_0"; @@ -281,6 +292,38 @@ export const serializeAws_restJson1CreateClusterCommand = async ( }); }; +export const serializeAws_restJson1CreateClusterV2Command = async ( + input: CreateClusterV2CommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/api/v2/clusters"; + let body: any; + body = JSON.stringify({ + ...(input.ClusterName !== undefined && input.ClusterName !== null && { clusterName: input.ClusterName }), + ...(input.Provisioned !== undefined && + input.Provisioned !== null && { + provisioned: serializeAws_restJson1ProvisionedRequest(input.Provisioned, context), + }), + ...(input.Serverless !== undefined && + input.Serverless !== null && { serverless: serializeAws_restJson1ServerlessRequest(input.Serverless, context) }), + ...(input.Tags !== undefined && + input.Tags !== null && { tags: serializeAws_restJson1__mapOf__string(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1CreateConfigurationCommand = async ( input: CreateConfigurationCommandInput, context: __SerdeContext @@ -431,6 +474,35 @@ export const serializeAws_restJson1DescribeClusterOperationCommand = async ( }); }; +export const serializeAws_restJson1DescribeClusterV2Command = async ( + input: DescribeClusterV2CommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/api/v2/clusters/{ClusterArn}"; + if (input.ClusterArn !== undefined) { + const labelValue: string = input.ClusterArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ClusterArn."); + } + resolvedPath = resolvedPath.replace("{ClusterArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ClusterArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1DescribeConfigurationCommand = async ( input: DescribeConfigurationCommandInput, context: __SerdeContext @@ -611,6 +683,32 @@ export const serializeAws_restJson1ListClustersCommand = async ( }); }; +export const serializeAws_restJson1ListClustersV2Command = async ( + input: ListClustersV2CommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/api/v2/clusters"; + const query: any = { + ...(input.ClusterNameFilter !== undefined && { clusterNameFilter: input.ClusterNameFilter }), + ...(input.ClusterTypeFilter !== undefined && { clusterTypeFilter: input.ClusterTypeFilter }), + ...(input.MaxResults !== undefined && { maxResults: input.MaxResults.toString() }), + ...(input.NextToken !== undefined && { nextToken: input.NextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1ListConfigurationRevisionsCommand = async ( input: ListConfigurationRevisionsCommandInput, context: __SerdeContext @@ -1577,6 +1675,121 @@ const deserializeAws_restJson1CreateClusterCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1CreateClusterV2Command = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateClusterV2CommandError(output, context); + } + const contents: CreateClusterV2CommandOutput = { + $metadata: deserializeMetadata(output), + ClusterArn: undefined, + ClusterName: undefined, + ClusterType: undefined, + State: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.clusterArn !== undefined && data.clusterArn !== null) { + contents.ClusterArn = __expectString(data.clusterArn); + } + if (data.clusterName !== undefined && data.clusterName !== null) { + contents.ClusterName = __expectString(data.clusterName); + } + if (data.clusterType !== undefined && data.clusterType !== null) { + contents.ClusterType = __expectString(data.clusterType); + } + if (data.state !== undefined && data.state !== null) { + contents.State = __expectString(data.state); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateClusterV2CommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequestException": + case "com.amazonaws.kafka#BadRequestException": + response = { + ...(await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.kafka#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ForbiddenException": + case "com.amazonaws.kafka#ForbiddenException": + response = { + ...(await deserializeAws_restJson1ForbiddenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerErrorException": + case "com.amazonaws.kafka#InternalServerErrorException": + response = { + ...(await deserializeAws_restJson1InternalServerErrorExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.kafka#ServiceUnavailableException": + response = { + ...(await deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.kafka#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnauthorizedException": + case "com.amazonaws.kafka#UnauthorizedException": + response = { + ...(await deserializeAws_restJson1UnauthorizedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1CreateConfigurationCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2036,6 +2249,93 @@ const deserializeAws_restJson1DescribeClusterOperationCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1DescribeClusterV2Command = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeClusterV2CommandError(output, context); + } + const contents: DescribeClusterV2CommandOutput = { + $metadata: deserializeMetadata(output), + ClusterInfo: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.clusterInfo !== undefined && data.clusterInfo !== null) { + contents.ClusterInfo = deserializeAws_restJson1Cluster(data.clusterInfo, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeClusterV2CommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequestException": + case "com.amazonaws.kafka#BadRequestException": + response = { + ...(await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ForbiddenException": + case "com.amazonaws.kafka#ForbiddenException": + response = { + ...(await deserializeAws_restJson1ForbiddenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerErrorException": + case "com.amazonaws.kafka#InternalServerErrorException": + response = { + ...(await deserializeAws_restJson1InternalServerErrorExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotFoundException": + case "com.amazonaws.kafka#NotFoundException": + response = { + ...(await deserializeAws_restJson1NotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnauthorizedException": + case "com.amazonaws.kafka#UnauthorizedException": + response = { + ...(await deserializeAws_restJson1UnauthorizedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1DescribeConfigurationCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2389,19 +2689,129 @@ export const deserializeAws_restJson1GetCompatibleKafkaVersionsCommand = async ( CompatibleKafkaVersions: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.compatibleKafkaVersions !== undefined && data.compatibleKafkaVersions !== null) { - contents.CompatibleKafkaVersions = deserializeAws_restJson1__listOfCompatibleKafkaVersion( - data.compatibleKafkaVersions, + if (data.compatibleKafkaVersions !== undefined && data.compatibleKafkaVersions !== null) { + contents.CompatibleKafkaVersions = deserializeAws_restJson1__listOfCompatibleKafkaVersion( + data.compatibleKafkaVersions, + context + ); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetCompatibleKafkaVersionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "BadRequestException": + case "com.amazonaws.kafka#BadRequestException": + response = { + ...(await deserializeAws_restJson1BadRequestExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ForbiddenException": + case "com.amazonaws.kafka#ForbiddenException": + response = { + ...(await deserializeAws_restJson1ForbiddenExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerErrorException": + case "com.amazonaws.kafka#InternalServerErrorException": + response = { + ...(await deserializeAws_restJson1InternalServerErrorExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "NotFoundException": + case "com.amazonaws.kafka#NotFoundException": + response = { + ...(await deserializeAws_restJson1NotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceUnavailableException": + case "com.amazonaws.kafka#ServiceUnavailableException": + response = { + ...(await deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyRequestsException": + case "com.amazonaws.kafka#TooManyRequestsException": + response = { + ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "UnauthorizedException": + case "com.amazonaws.kafka#UnauthorizedException": + response = { + ...(await deserializeAws_restJson1UnauthorizedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListClusterOperationsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListClusterOperationsCommandError(output, context); + } + const contents: ListClusterOperationsCommandOutput = { + $metadata: deserializeMetadata(output), + ClusterOperationInfoList: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.clusterOperationInfoList !== undefined && data.clusterOperationInfoList !== null) { + contents.ClusterOperationInfoList = deserializeAws_restJson1__listOfClusterOperationInfo( + data.clusterOperationInfoList, context ); } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.NextToken = __expectString(data.nextToken); + } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetCompatibleKafkaVersionsCommandError = async ( +const deserializeAws_restJson1ListClusterOperationsCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2434,30 +2844,6 @@ const deserializeAws_restJson1GetCompatibleKafkaVersionsCommandError = async ( $metadata: deserializeMetadata(output), }; break; - case "NotFoundException": - case "com.amazonaws.kafka#NotFoundException": - response = { - ...(await deserializeAws_restJson1NotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ServiceUnavailableException": - case "com.amazonaws.kafka#ServiceUnavailableException": - response = { - ...(await deserializeAws_restJson1ServiceUnavailableExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "TooManyRequestsException": - case "com.amazonaws.kafka#TooManyRequestsException": - response = { - ...(await deserializeAws_restJson1TooManyRequestsExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; case "UnauthorizedException": case "com.amazonaws.kafka#UnauthorizedException": response = { @@ -2483,24 +2869,21 @@ const deserializeAws_restJson1GetCompatibleKafkaVersionsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1ListClusterOperationsCommand = async ( +export const deserializeAws_restJson1ListClustersCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1ListClusterOperationsCommandError(output, context); + return deserializeAws_restJson1ListClustersCommandError(output, context); } - const contents: ListClusterOperationsCommandOutput = { + const contents: ListClustersCommandOutput = { $metadata: deserializeMetadata(output), - ClusterOperationInfoList: undefined, + ClusterInfoList: undefined, NextToken: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.clusterOperationInfoList !== undefined && data.clusterOperationInfoList !== null) { - contents.ClusterOperationInfoList = deserializeAws_restJson1__listOfClusterOperationInfo( - data.clusterOperationInfoList, - context - ); + if (data.clusterInfoList !== undefined && data.clusterInfoList !== null) { + contents.ClusterInfoList = deserializeAws_restJson1__listOfClusterInfo(data.clusterInfoList, context); } if (data.nextToken !== undefined && data.nextToken !== null) { contents.NextToken = __expectString(data.nextToken); @@ -2508,10 +2891,10 @@ export const deserializeAws_restJson1ListClusterOperationsCommand = async ( return Promise.resolve(contents); }; -const deserializeAws_restJson1ListClusterOperationsCommandError = async ( +const deserializeAws_restJson1ListClustersCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2569,21 +2952,21 @@ const deserializeAws_restJson1ListClusterOperationsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1ListClustersCommand = async ( +export const deserializeAws_restJson1ListClustersV2Command = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1ListClustersCommandError(output, context); + return deserializeAws_restJson1ListClustersV2CommandError(output, context); } - const contents: ListClustersCommandOutput = { + const contents: ListClustersV2CommandOutput = { $metadata: deserializeMetadata(output), ClusterInfoList: undefined, NextToken: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); if (data.clusterInfoList !== undefined && data.clusterInfoList !== null) { - contents.ClusterInfoList = deserializeAws_restJson1__listOfClusterInfo(data.clusterInfoList, context); + contents.ClusterInfoList = deserializeAws_restJson1__listOfCluster(data.clusterInfoList, context); } if (data.nextToken !== undefined && data.nextToken !== null) { contents.NextToken = __expectString(data.nextToken); @@ -2591,10 +2974,10 @@ export const deserializeAws_restJson1ListClustersCommand = async ( return Promise.resolve(contents); }; -const deserializeAws_restJson1ListClustersCommandError = async ( +const deserializeAws_restJson1ListClustersV2CommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -4511,6 +4894,17 @@ const serializeAws_restJson1__listOfBrokerEBSVolumeInfo = ( }); }; +const serializeAws_restJson1__listOfVpcConfig = (input: VpcConfig[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1VpcConfig(entry, context); + }); +}; + const serializeAws_restJson1__mapOf__string = (input: { [key: string]: string }, context: __SerdeContext): any => { return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { if (value === null) { @@ -4683,6 +5077,38 @@ const serializeAws_restJson1PrometheusInfo = (input: PrometheusInfo, context: __ }; }; +const serializeAws_restJson1ProvisionedRequest = (input: ProvisionedRequest, context: __SerdeContext): any => { + return { + ...(input.BrokerNodeGroupInfo !== undefined && + input.BrokerNodeGroupInfo !== null && { + brokerNodeGroupInfo: serializeAws_restJson1BrokerNodeGroupInfo(input.BrokerNodeGroupInfo, context), + }), + ...(input.ClientAuthentication !== undefined && + input.ClientAuthentication !== null && { + clientAuthentication: serializeAws_restJson1ClientAuthentication(input.ClientAuthentication, context), + }), + ...(input.ConfigurationInfo !== undefined && + input.ConfigurationInfo !== null && { + configurationInfo: serializeAws_restJson1ConfigurationInfo(input.ConfigurationInfo, context), + }), + ...(input.EncryptionInfo !== undefined && + input.EncryptionInfo !== null && { + encryptionInfo: serializeAws_restJson1EncryptionInfo(input.EncryptionInfo, context), + }), + ...(input.EnhancedMonitoring !== undefined && + input.EnhancedMonitoring !== null && { enhancedMonitoring: input.EnhancedMonitoring }), + ...(input.KafkaVersion !== undefined && input.KafkaVersion !== null && { kafkaVersion: input.KafkaVersion }), + ...(input.LoggingInfo !== undefined && + input.LoggingInfo !== null && { loggingInfo: serializeAws_restJson1LoggingInfo(input.LoggingInfo, context) }), + ...(input.NumberOfBrokerNodes !== undefined && + input.NumberOfBrokerNodes !== null && { numberOfBrokerNodes: input.NumberOfBrokerNodes }), + ...(input.OpenMonitoring !== undefined && + input.OpenMonitoring !== null && { + openMonitoring: serializeAws_restJson1OpenMonitoringInfo(input.OpenMonitoring, context), + }), + }; +}; + const serializeAws_restJson1PublicAccess = (input: PublicAccess, context: __SerdeContext): any => { return { ...(input.Type !== undefined && input.Type !== null && { type: input.Type }), @@ -4711,6 +5137,33 @@ const serializeAws_restJson1Scram = (input: Scram, context: __SerdeContext): any }; }; +const serializeAws_restJson1ServerlessClientAuthentication = ( + input: ServerlessClientAuthentication, + context: __SerdeContext +): any => { + return { + ...(input.Sasl !== undefined && + input.Sasl !== null && { sasl: serializeAws_restJson1ServerlessSasl(input.Sasl, context) }), + }; +}; + +const serializeAws_restJson1ServerlessRequest = (input: ServerlessRequest, context: __SerdeContext): any => { + return { + ...(input.ClientAuthentication !== undefined && + input.ClientAuthentication !== null && { + clientAuthentication: serializeAws_restJson1ServerlessClientAuthentication(input.ClientAuthentication, context), + }), + ...(input.VpcConfigs !== undefined && + input.VpcConfigs !== null && { vpcConfigs: serializeAws_restJson1__listOfVpcConfig(input.VpcConfigs, context) }), + }; +}; + +const serializeAws_restJson1ServerlessSasl = (input: ServerlessSasl, context: __SerdeContext): any => { + return { + ...(input.Iam !== undefined && input.Iam !== null && { iam: serializeAws_restJson1Iam(input.Iam, context) }), + }; +}; + const serializeAws_restJson1StorageInfo = (input: StorageInfo, context: __SerdeContext): any => { return { ...(input.EbsStorageInfo !== undefined && @@ -4736,6 +5189,17 @@ const serializeAws_restJson1Unauthenticated = (input: Unauthenticated, context: }; }; +const serializeAws_restJson1VpcConfig = (input: VpcConfig, context: __SerdeContext): any => { + return { + ...(input.SecurityGroupIds !== undefined && + input.SecurityGroupIds !== null && { + securityGroupIds: serializeAws_restJson1__listOf__string(input.SecurityGroupIds, context), + }), + ...(input.SubnetIds !== undefined && + input.SubnetIds !== null && { subnetIds: serializeAws_restJson1__listOf__string(input.SubnetIds, context) }), + }; +}; + const deserializeAws_restJson1__listOf__string = (output: any, context: __SerdeContext): string[] => { return (output || []) .filter((e: any) => e != null) @@ -4761,6 +5225,17 @@ const deserializeAws_restJson1__listOfBrokerEBSVolumeInfo = ( }); }; +const deserializeAws_restJson1__listOfCluster = (output: any, context: __SerdeContext): Cluster[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Cluster(entry, context); + }); +}; + const deserializeAws_restJson1__listOfClusterInfo = (output: any, context: __SerdeContext): ClusterInfo[] => { return (output || []) .filter((e: any) => e != null) @@ -4875,6 +5350,17 @@ const deserializeAws_restJson1__listOfUnprocessedScramSecret = ( }); }; +const deserializeAws_restJson1__listOfVpcConfig = (output: any, context: __SerdeContext): VpcConfig[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1VpcConfig(entry, context); + }); +}; + const deserializeAws_restJson1__mapOf__string = (output: any, context: __SerdeContext): { [key: string]: string } => { return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { if (value === null) { @@ -4977,6 +5463,37 @@ const deserializeAws_restJson1CloudWatchLogs = (output: any, context: __SerdeCon } as any; }; +const deserializeAws_restJson1Cluster = (output: any, context: __SerdeContext): Cluster => { + return { + ActiveOperationArn: __expectString(output.activeOperationArn), + ClusterArn: __expectString(output.clusterArn), + ClusterName: __expectString(output.clusterName), + ClusterType: __expectString(output.clusterType), + CreationTime: + output.creationTime !== undefined && output.creationTime !== null + ? __expectNonNull(__parseRfc3339DateTime(output.creationTime)) + : undefined, + CurrentVersion: __expectString(output.currentVersion), + Provisioned: + output.provisioned !== undefined && output.provisioned !== null + ? deserializeAws_restJson1Provisioned(output.provisioned, context) + : undefined, + Serverless: + output.serverless !== undefined && output.serverless !== null + ? deserializeAws_restJson1Serverless(output.serverless, context) + : undefined, + State: __expectString(output.state), + StateInfo: + output.stateInfo !== undefined && output.stateInfo !== null + ? deserializeAws_restJson1StateInfo(output.stateInfo, context) + : undefined, + Tags: + output.tags !== undefined && output.tags !== null + ? deserializeAws_restJson1__mapOf__string(output.tags, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1ClusterInfo = (output: any, context: __SerdeContext): ClusterInfo => { return { ActiveOperationArn: __expectString(output.activeOperationArn), @@ -5199,6 +5716,12 @@ const deserializeAws_restJson1JmxExporter = (output: any, context: __SerdeContex } as any; }; +const deserializeAws_restJson1JmxExporterInfo = (output: any, context: __SerdeContext): JmxExporterInfo => { + return { + EnabledInBroker: __expectBoolean(output.enabledInBroker), + } as any; +}; + const deserializeAws_restJson1KafkaVersion = (output: any, context: __SerdeContext): KafkaVersion => { return { Status: __expectString(output.status), @@ -5258,6 +5781,12 @@ const deserializeAws_restJson1NodeExporter = (output: any, context: __SerdeConte } as any; }; +const deserializeAws_restJson1NodeExporterInfo = (output: any, context: __SerdeContext): NodeExporterInfo => { + return { + EnabledInBroker: __expectBoolean(output.enabledInBroker), + } as any; +}; + const deserializeAws_restJson1NodeInfo = (output: any, context: __SerdeContext): NodeInfo => { return { AddedToClusterTime: __expectString(output.addedToClusterTime), @@ -5284,6 +5813,15 @@ const deserializeAws_restJson1OpenMonitoring = (output: any, context: __SerdeCon } as any; }; +const deserializeAws_restJson1OpenMonitoringInfo = (output: any, context: __SerdeContext): OpenMonitoringInfo => { + return { + Prometheus: + output.prometheus !== undefined && output.prometheus !== null + ? deserializeAws_restJson1PrometheusInfo(output.prometheus, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1Prometheus = (output: any, context: __SerdeContext): Prometheus => { return { JmxExporter: @@ -5297,6 +5835,52 @@ const deserializeAws_restJson1Prometheus = (output: any, context: __SerdeContext } as any; }; +const deserializeAws_restJson1PrometheusInfo = (output: any, context: __SerdeContext): PrometheusInfo => { + return { + JmxExporter: + output.jmxExporter !== undefined && output.jmxExporter !== null + ? deserializeAws_restJson1JmxExporterInfo(output.jmxExporter, context) + : undefined, + NodeExporter: + output.nodeExporter !== undefined && output.nodeExporter !== null + ? deserializeAws_restJson1NodeExporterInfo(output.nodeExporter, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1Provisioned = (output: any, context: __SerdeContext): Provisioned => { + return { + BrokerNodeGroupInfo: + output.brokerNodeGroupInfo !== undefined && output.brokerNodeGroupInfo !== null + ? deserializeAws_restJson1BrokerNodeGroupInfo(output.brokerNodeGroupInfo, context) + : undefined, + ClientAuthentication: + output.clientAuthentication !== undefined && output.clientAuthentication !== null + ? deserializeAws_restJson1ClientAuthentication(output.clientAuthentication, context) + : undefined, + CurrentBrokerSoftwareInfo: + output.currentBrokerSoftwareInfo !== undefined && output.currentBrokerSoftwareInfo !== null + ? deserializeAws_restJson1BrokerSoftwareInfo(output.currentBrokerSoftwareInfo, context) + : undefined, + EncryptionInfo: + output.encryptionInfo !== undefined && output.encryptionInfo !== null + ? deserializeAws_restJson1EncryptionInfo(output.encryptionInfo, context) + : undefined, + EnhancedMonitoring: __expectString(output.enhancedMonitoring), + LoggingInfo: + output.loggingInfo !== undefined && output.loggingInfo !== null + ? deserializeAws_restJson1LoggingInfo(output.loggingInfo, context) + : undefined, + NumberOfBrokerNodes: __expectInt32(output.numberOfBrokerNodes), + OpenMonitoring: + output.openMonitoring !== undefined && output.openMonitoring !== null + ? deserializeAws_restJson1OpenMonitoringInfo(output.openMonitoring, context) + : undefined, + ZookeeperConnectString: __expectString(output.zookeeperConnectString), + ZookeeperConnectStringTls: __expectString(output.zookeeperConnectStringTls), + } as any; +}; + const deserializeAws_restJson1PublicAccess = (output: any, context: __SerdeContext): PublicAccess => { return { Type: __expectString(output.type), @@ -5327,6 +5911,37 @@ const deserializeAws_restJson1Scram = (output: any, context: __SerdeContext): Sc } as any; }; +const deserializeAws_restJson1Serverless = (output: any, context: __SerdeContext): Serverless => { + return { + ClientAuthentication: + output.clientAuthentication !== undefined && output.clientAuthentication !== null + ? deserializeAws_restJson1ServerlessClientAuthentication(output.clientAuthentication, context) + : undefined, + VpcConfigs: + output.vpcConfigs !== undefined && output.vpcConfigs !== null + ? deserializeAws_restJson1__listOfVpcConfig(output.vpcConfigs, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ServerlessClientAuthentication = ( + output: any, + context: __SerdeContext +): ServerlessClientAuthentication => { + return { + Sasl: + output.sasl !== undefined && output.sasl !== null + ? deserializeAws_restJson1ServerlessSasl(output.sasl, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ServerlessSasl = (output: any, context: __SerdeContext): ServerlessSasl => { + return { + Iam: output.iam !== undefined && output.iam !== null ? deserializeAws_restJson1Iam(output.iam, context) : undefined, + } as any; +}; + const deserializeAws_restJson1StateInfo = (output: any, context: __SerdeContext): StateInfo => { return { Code: __expectString(output.code), @@ -5370,6 +5985,19 @@ const deserializeAws_restJson1UnprocessedScramSecret = ( } as any; }; +const deserializeAws_restJson1VpcConfig = (output: any, context: __SerdeContext): VpcConfig => { + return { + SecurityGroupIds: + output.securityGroupIds !== undefined && output.securityGroupIds !== null + ? deserializeAws_restJson1__listOf__string(output.securityGroupIds, context) + : undefined, + SubnetIds: + output.subnetIds !== undefined && output.subnetIds !== null + ? deserializeAws_restJson1__listOf__string(output.subnetIds, context) + : undefined, + } as any; +}; + const deserializeAws_restJson1ZookeeperNodeInfo = (output: any, context: __SerdeContext): ZookeeperNodeInfo => { return { AttachedENIId: __expectString(output.attachedENIId), diff --git a/clients/client-kinesis/README.md b/clients/client-kinesis/README.md index b34ee9e8e2c6..4331a20638db 100644 --- a/clients/client-kinesis/README.md +++ b/clients/client-kinesis/README.md @@ -9,8 +9,8 @@ AWS SDK for JavaScript Kinesis Client for Node.js, Browser and React Native. Amazon Kinesis Data Streams Service API Reference -

                              Amazon Kinesis Data Streams is a managed service that scales elastically for -real-time processing of streaming big data.

                              +

                              Amazon Kinesis Data Streams is a managed service that scales elastically for real-time +processing of streaming big data.

                              ## Installing diff --git a/clients/client-kinesis/src/Kinesis.ts b/clients/client-kinesis/src/Kinesis.ts index 557b1bc0ee1b..0b93f3e8c9af 100644 --- a/clients/client-kinesis/src/Kinesis.ts +++ b/clients/client-kinesis/src/Kinesis.ts @@ -112,19 +112,22 @@ import { UpdateShardCountCommandInput, UpdateShardCountCommandOutput, } from "./commands/UpdateShardCountCommand"; +import { + UpdateStreamModeCommand, + UpdateStreamModeCommandInput, + UpdateStreamModeCommandOutput, +} from "./commands/UpdateStreamModeCommand"; import { KinesisClient } from "./KinesisClient"; /** * Amazon Kinesis Data Streams Service API Reference - *

                              Amazon Kinesis Data Streams is a managed service that scales elastically for - * real-time processing of streaming big data.

                              + *

                              Amazon Kinesis Data Streams is a managed service that scales elastically for real-time + * processing of streaming big data.

                              */ export class Kinesis extends KinesisClient { /** - *

                              Adds or updates tags for the specified Kinesis data stream. Each time you invoke - * this operation, you can specify up to 10 tags. If you want to add more than 10 tags to - * your stream, you can invoke this operation multiple times. In total, each stream can - * have up to 50 tags.

                              + *

                              Adds or updates tags for the specified Kinesis data stream. You can assign up to 50 + * tags to a data stream.

                              *

                              If tags have already been assigned to the stream, AddTagsToStream * overwrites any existing tags that correspond to the specified tag keys.

                              *

                              @@ -161,19 +164,19 @@ export class Kinesis extends KinesisClient { } /** - *

                              Creates a Kinesis data stream. A stream captures and transports data records that - * are continuously emitted from different data sources or producers. + *

                              Creates a Kinesis data stream. A stream captures and transports data records that are + * continuously emitted from different data sources or producers. * Scale-out within a stream is explicitly supported by means of shards, which are uniquely * identified groups of data records in a stream.

                              - *

                              You specify and control the number of shards that a stream is composed of. Each - * shard can support reads up to five transactions per second, up to a maximum data read - * total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, - * up to a maximum data write total of 1 MiB per second. If the amount of data input - * increases or decreases, you can add or remove shards.

                              - *

                              The stream name identifies the stream. The name is scoped to the AWS account used - * by the application. It is also scoped by AWS Region. That is, two streams in two - * different accounts can have the same name, and two streams in the same account, but in - * two different Regions, can have the same name.

                              + *

                              You specify and control the number of shards that a stream is composed of. Each shard + * can support reads up to five transactions per second, up to a maximum data read total of + * 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a + * maximum data write total of 1 MiB per second. If the amount of data input increases or + * decreases, you can add or remove shards.

                              + *

                              The stream name identifies the stream. The name is scoped to the Amazon Web Services + * account used by the application. It is also scoped by Amazon Web Services Region. That + * is, two streams in two different accounts can have the same name, and two streams in the + * same account, but in two different Regions, can have the same name.

                              *

                              * CreateStream is an asynchronous operation. Upon receiving a * CreateStream request, Kinesis Data Streams immediately returns and sets @@ -185,20 +188,20 @@ export class Kinesis extends KinesisClient { *

                                *
                              • * - *

                                Have more than five streams in the CREATING state at any point - * in time.

                                + *

                                Have more than five streams in the CREATING state at any point in + * time.

                                *
                              • *
                              • * *

                                Create more shards than are authorized for your account.

                                *
                              • *
                              - *

                              For the default shard limit for an AWS account, see Amazon Kinesis Data Streams - * Limits in the Amazon Kinesis Data Streams Developer - * Guide. To increase this limit, contact AWS - * Support.

                              - *

                              You can use DescribeStream to check the stream status, which is - * returned in StreamStatus.

                              + *

                              For the default shard limit for an Amazon Web Services account, see Amazon + * Kinesis Data Streams Limits in the Amazon Kinesis Data Streams + * Developer Guide. To increase this limit, contact Amazon Web Services + * Support.

                              + *

                              You can use DescribeStreamSummary to check the stream status, which + * is returned in StreamStatus.

                              *

                              * CreateStream has a limit of five transactions per second per * account.

                              @@ -230,12 +233,12 @@ export class Kinesis extends KinesisClient { } /** - *

                              Decreases the Kinesis data stream's retention period, which is the length of time - * data records are accessible after they are added to the stream. The minimum value of a + *

                              Decreases the Kinesis data stream's retention period, which is the length of time data + * records are accessible after they are added to the stream. The minimum value of a * stream's retention period is 24 hours.

                              - *

                              This operation may result in lost data. For example, if the stream's retention - * period is 48 hours and is decreased to 24 hours, any data already in the stream that is - * older than 24 hours is inaccessible.

                              + *

                              This operation may result in lost data. For example, if the stream's retention period + * is 48 hours and is decreased to 24 hours, any data already in the stream that is older + * than 24 hours is inaccessible.

                              */ public decreaseStreamRetentionPeriod( args: DecreaseStreamRetentionPeriodCommandInput, @@ -279,10 +282,11 @@ export class Kinesis extends KinesisClient { * Note: Kinesis Data Streams might continue to accept * data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the * DELETING state until the stream deletion is complete.

                              - *

                              When you delete a stream, any shards in that stream are also deleted, and any tags - * are dissociated from the stream.

                              - *

                              You can use the DescribeStream operation to check the state of - * the stream, which is returned in StreamStatus.

                              + *

                              When you delete a stream, any shards in that stream are also deleted, and any tags are + * dissociated from the stream.

                              + *

                              You can use the DescribeStreamSummary operation to check the state + * of the stream, which is returned in StreamStatus.

                              + * *

                              * DeleteStream has a limit of five transactions per second per * account.

                              @@ -389,7 +393,11 @@ export class Kinesis extends KinesisClient { /** *

                              Describes the specified Kinesis data stream.

                              - * + * + *

                              This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the + * specified Kinesis data stream and the ListShards API to list the + * shards in a specified data stream and obtain information about each shard.

                              + *
                              *

                              The information returned includes the stream name, Amazon Resource Name (ARN), * creation time, enhanced metric configuration, and shard map. The shard map is an array * of shard objects. For each shard object, there is the hash key and sequence number @@ -477,12 +485,12 @@ export class Kinesis extends KinesisClient { /** *

                              Provides a summarized description of the specified Kinesis data stream without the * shard list.

                              - *

                              The information returned includes the stream name, Amazon Resource Name (ARN), - * status, record retention period, approximate creation time, monitoring, encryption - * details, and open shard count.

                              + *

                              The information returned includes the stream name, Amazon Resource Name (ARN), status, + * record retention period, approximate creation time, monitoring, encryption details, and + * open shard count.

                              *

                              - * DescribeStreamSummary has a limit of 20 transactions per second - * per account.

                              + * DescribeStreamSummary has a limit of 20 transactions per second per + * account.

                              */ public describeStreamSummary( args: DescribeStreamSummaryCommandInput, @@ -596,34 +604,44 @@ export class Kinesis extends KinesisClient { * You can terminate the loop when the shard is closed, or when the shard iterator reaches * the record with the sequence number or other attribute that marks it as the last record * to process.

                              - *

                              Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB - * per second. You can ensure that your calls don't exceed the maximum supported size or + *

                              Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per + * second. You can ensure that your calls don't exceed the maximum supported size or * throughput by using the Limit parameter to specify the maximum number of * records that GetRecords can return. Consider your average record size * when determining this limit. The maximum number of records that can be returned per call * is 10,000.

                              * - *

                              The size of the data returned by GetRecords varies depending on - * the utilization of the shard. The maximum size of data that GetRecords - * can return is 10 MiB. If a call returns this amount of data, subsequent calls made - * within the next 5 seconds throw ProvisionedThroughputExceededException. If - * there is insufficient provisioned throughput on the stream, subsequent calls made within - * the next 1 second throw ProvisionedThroughputExceededException. GetRecords doesn't return any data when it throws an exception. For this - * reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1 - * second.

                              + *

                              The size of the data returned by GetRecords varies depending on the + * utilization of the shard. It is recommended that consumer applications retrieve records + * via the GetRecords command using the 5 TPS limit to remain caught up. + * Retrieving records less frequently can lead to consumer applications falling behind. The + * maximum size of data that GetRecords can return is 10 MiB. If a call + * returns this amount of data, subsequent calls made within the next 5 seconds throw + * ProvisionedThroughputExceededException. If there is insufficient + * provisioned throughput on the stream, subsequent calls made within the next 1 second + * throw ProvisionedThroughputExceededException. GetRecords + * doesn't return any data when it throws an exception. For this reason, we recommend that + * you wait 1 second between calls to GetRecords. However, it's possible + * that the application will get exceptions for longer than 1 second.

                              + * + * + * + * + * + * + * + * *

                              To detect whether the application is falling behind in processing, you can use the * MillisBehindLatest response attribute. You can also monitor the stream * using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon * Kinesis Data Streams Developer Guide).

                              - *

                              Each Amazon Kinesis record includes a value, - * ApproximateArrivalTimestamp, that is set when a stream successfully - * receives and stores a record. This is commonly referred to as a server-side time stamp, - * whereas a client-side time stamp is set when a data producer creates or sends the record - * to a stream (a data producer is any data source putting data records into a stream, for - * example with PutRecords). The time stamp has millisecond precision. - * There are no guarantees about the time stamp accuracy, or that the time stamp is always - * increasing. For example, records in a shard or across a stream might have time stamps - * that are out of order.

                              + *

                              Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, + * that is set when a stream successfully receives and stores a record. This is commonly + * referred to as a server-side time stamp, whereas a client-side time stamp is set when a + * data producer creates or sends the record to a stream (a data producer is any data + * source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time + * stamp accuracy, or that the time stamp is always increasing. For example, records in a + * shard or across a stream might have time stamps that are out of order.

                              *

                              This operation has a limit of five transactions per second per shard.

                              */ public getRecords(args: GetRecordsCommandInput, options?: __HttpHandlerOptions): Promise; @@ -650,13 +668,13 @@ export class Kinesis extends KinesisClient { } /** - *

                              Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it - * is returned to the requester.

                              - *

                              A shard iterator specifies the shard position from which to start reading data - * records sequentially. The position is specified using the sequence number of a data - * record in a shard. A sequence number is the identifier associated with every record - * ingested in the stream, and is assigned when a record is put into the stream. Each - * stream has one or more shards.

                              + *

                              Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is + * returned to the requester.

                              + *

                              A shard iterator specifies the shard position from which to start reading data records + * sequentially. The position is specified using the sequence number of a data record in a + * shard. A sequence number is the identifier associated with every record ingested in the + * stream, and is assigned when a record is put into the stream. Each stream has one or + * more shards.

                              *

                              You must specify the shard iterator type. For example, you can set the * ShardIteratorType parameter to read exactly from the position denoted * by a specific sequence number by using the AT_SEQUENCE_NUMBER shard @@ -715,9 +733,9 @@ export class Kinesis extends KinesisClient { } /** - *

                              Increases the Kinesis data stream's retention period, which is the length of time - * data records are accessible after they are added to the stream. The maximum value of a - * stream's retention period is 168 hours (7 days).

                              + *

                              Increases the Kinesis data stream's retention period, which is the length of time data + * records are accessible after they are added to the stream. The maximum value of a + * stream's retention period is 8760 hours (365 days).

                              *

                              If you choose a longer stream retention period, this operation increases the time * period during which records that have not yet expired are accessible. However, it does * not make previous, expired data (older than the stream's previous retention period) @@ -755,8 +773,10 @@ export class Kinesis extends KinesisClient { } /** - *

                              Lists the shards in a stream and provides information about each shard. This - * operation has a limit of 100 transactions per second per data stream.

                              + *

                              Lists the shards in a stream and provides information about each shard. This operation + * has a limit of 1000 transactions per second per data stream.

                              + *

                              This action does not list expired shards. For information about expired shards, see + * Data Routing, Data Persistence, and Shard State after a Reshard.

                              * *

                              This API is a new operation that is used by the Amazon Kinesis Client Library * (KCL). If you have a fine-grained IAM policy that only allows specific operations, @@ -828,7 +848,7 @@ export class Kinesis extends KinesisClient { * ListStreams. You can limit the number of returned streams using the * Limit parameter. If you do not specify a value for the * Limit parameter, Kinesis Data Streams uses the default limit, which is - * currently 10.

                              + * currently 100.

                              *

                              You can detect if there are more streams available to list by using the * HasMoreStreams flag from the returned output. If there are more streams * available, you can request more streams by using the name of the last stream returned by @@ -917,7 +937,7 @@ export class Kinesis extends KinesisClient { * UPDATING, or DELETING state, MergeShards * returns a ResourceInUseException. If the specified stream does not exist, * MergeShards returns a ResourceNotFoundException.

                              - *

                              You can use DescribeStream to check the state of the stream, + *

                              You can use DescribeStreamSummary to check the state of the stream, * which is returned in StreamStatus.

                              *

                              * MergeShards is an asynchronous operation. Upon receiving a @@ -926,13 +946,13 @@ export class Kinesis extends KinesisClient { * operation is completed, Kinesis Data Streams sets the StreamStatus to * ACTIVE. Read and write operations continue to work while the stream is * in the UPDATING state.

                              - *

                              You use DescribeStream to determine the shard IDs that are - * specified in the MergeShards request.

                              + *

                              You use DescribeStreamSummary and the ListShards + * APIs to determine the shard IDs that are specified in the MergeShards + * request.

                              *

                              If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards, * or SplitShard, you receive a LimitExceededException.

                              *

                              - * MergeShards has a limit of five transactions per second per - * account.

                              + * MergeShards has a limit of five transactions per second per account.

                              */ public mergeShards(args: MergeShardsCommandInput, options?: __HttpHandlerOptions): Promise; public mergeShards(args: MergeShardsCommandInput, cb: (err: any, data?: MergeShardsCommandOutput) => void): void; @@ -970,18 +990,18 @@ export class Kinesis extends KinesisClient { * Kinesis Data Streams segregates the data records that belong to a stream into multiple * shards, using the partition key associated with each data record to determine the shard * to which a given data record belongs.

                              - *

                              Partition keys are Unicode strings, with a maximum length limit of 256 characters - * for each key. An MD5 hash function is used to map partition keys to 128-bit integer - * values and to map associated data records to shards using the hash key ranges of the - * shards. You can override hashing the partition key to determine the shard by explicitly + *

                              Partition keys are Unicode strings, with a maximum length limit of 256 characters for + * each key. An MD5 hash function is used to map partition keys to 128-bit integer values + * and to map associated data records to shards using the hash key ranges of the shards. + * You can override hashing the partition key to determine the shard by explicitly * specifying a hash value using the ExplicitHashKey parameter. For more * information, see Adding Data to a Stream in the Amazon Kinesis Data Streams * Developer Guide.

                              *

                              * PutRecord returns the shard ID of where the data record was placed and the * sequence number that was assigned to the data record.

                              - *

                              Sequence numbers increase over time and are specific to a shard within a stream, - * not across all shards within a stream. To guarantee strictly increasing ordering, write + *

                              Sequence numbers increase over time and are specific to a shard within a stream, not + * across all shards within a stream. To guarantee strictly increasing ordering, write * serially to a shard and use the SequenceNumberForOrdering parameter. For * more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams * Developer Guide.

                              @@ -992,8 +1012,8 @@ export class Kinesis extends KinesisClient { *

                              If a PutRecord request cannot be processed because of insufficient * provisioned throughput on the shard involved in the request, PutRecord * throws ProvisionedThroughputExceededException.

                              - *

                              By default, data records are accessible for 24 hours from the time that they are - * added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                              + *

                              By default, data records are accessible for 24 hours from the time that they are added + * to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                              */ public putRecord(args: PutRecordCommandInput, options?: __HttpHandlerOptions): Promise; public putRecord(args: PutRecordCommandInput, cb: (err: any, data?: PutRecordCommandOutput) => void): void; @@ -1022,8 +1042,8 @@ export class Kinesis extends KinesisClient { *

                              Writes multiple data records into a Kinesis data stream in a single call (also * referred to as a PutRecords request). Use this operation to send data into * the stream for data ingestion and processing.

                              - *

                              Each PutRecords request can support up to 500 records. Each record in - * the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, + *

                              Each PutRecords request can support up to 500 records. Each record in the + * request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, * including partition keys. Each shard can support writes up to 1,000 records per second, * up to a maximum data write total of 1 MiB per second.

                              *

                              You must specify the name of the stream that captures, stores, and transports the @@ -1048,9 +1068,9 @@ export class Kinesis extends KinesisClient { * record in the request array using natural ordering, from the top to the bottom of the * request and response. The response Records array always includes the same * number of records as the request array.

                              - *

                              The response Records array includes both successfully and - * unsuccessfully processed records. Kinesis Data Streams attempts to process all records - * in each PutRecords request. A single record failure does not stop the + *

                              The response Records array includes both successfully and unsuccessfully + * processed records. Kinesis Data Streams attempts to process all records in each + * PutRecords request. A single record failure does not stop the * processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering * of records. If you need to read records in the same order they are written to the * stream, use PutRecord instead of PutRecords, and write to @@ -1073,8 +1093,8 @@ export class Kinesis extends KinesisClient { *

                              After you write a record to a stream, you cannot modify that record or its order * within the stream.

                              *
                              - *

                              By default, data records are accessible for 24 hours from the time that they are - * added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                              + *

                              By default, data records are accessible for 24 hours from the time that they are added + * to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                              */ public putRecords(args: PutRecordsCommandInput, options?: __HttpHandlerOptions): Promise; public putRecords(args: PutRecordsCommandInput, cb: (err: any, data?: PutRecordsCommandOutput) => void): void; @@ -1192,38 +1212,36 @@ export class Kinesis extends KinesisClient { * SplitShard to increase stream capacity, so that more Kinesis Data * Streams applications can simultaneously read data from the stream for real-time * processing.

                              - *

                              You must specify the shard to be split and the new hash key, which is the position - * in the shard where the shard gets split in two. In many cases, the new hash key might be + *

                              You must specify the shard to be split and the new hash key, which is the position in + * the shard where the shard gets split in two. In many cases, the new hash key might be * the average of the beginning and ending hash key, but it can be any hash key value in * the range being mapped into the shard. For more information, see Split a * Shard in the Amazon Kinesis Data Streams Developer * Guide.

                              - *

                              You can use DescribeStream to determine the shard ID and hash key - * values for the ShardToSplit and NewStartingHashKey parameters - * that are specified in the SplitShard request.

                              + *

                              You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for the ShardToSplit + * and NewStartingHashKey parameters that are specified in the + * SplitShard request.

                              *

                              * SplitShard is an asynchronous operation. Upon receiving a * SplitShard request, Kinesis Data Streams immediately returns a response * and sets the stream status to UPDATING. After the operation is completed, * Kinesis Data Streams sets the stream status to ACTIVE. Read and write * operations continue to work while the stream is in the UPDATING state.

                              - *

                              You can use DescribeStream to check the status of the stream, which is - * returned in StreamStatus. If the stream is in the ACTIVE - * state, you can call SplitShard. If a stream is in CREATING or - * UPDATING or DELETING states, DescribeStream - * returns a ResourceInUseException.

                              - *

                              If the specified stream does not exist, DescribeStream returns a - * ResourceNotFoundException. If you try to create more shards than are - * authorized for your account, you receive a LimitExceededException.

                              - *

                              For the default shard limit for an AWS account, see Kinesis Data Streams - * Limits in the Amazon Kinesis Data Streams Developer - * Guide. To increase this limit, contact AWS - * Support.

                              + *

                              You can use DescribeStreamSummary to check the status of the stream, + * which is returned in StreamStatus. If the stream is in the + * ACTIVE state, you can call SplitShard. + *

                              + *

                              If the specified stream does not exist, DescribeStreamSummary + * returns a ResourceNotFoundException. If you try to create more shards than + * are authorized for your account, you receive a LimitExceededException.

                              + *

                              For the default shard limit for an Amazon Web Services account, see Kinesis + * Data Streams Limits in the Amazon Kinesis Data Streams Developer + * Guide. To increase this limit, contact Amazon Web Services + * Support.

                              *

                              If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a * LimitExceededException.

                              *

                              - * SplitShard has a limit of five transactions per second per - * account.

                              + * SplitShard has a limit of five transactions per second per account.

                              */ public splitShard(args: SplitShardCommandInput, options?: __HttpHandlerOptions): Promise; public splitShard(args: SplitShardCommandInput, cb: (err: any, data?: SplitShardCommandOutput) => void): void; @@ -1249,22 +1267,22 @@ export class Kinesis extends KinesisClient { } /** - *

                              Enables or updates server-side encryption using an AWS KMS key for a specified - * stream.

                              - *

                              Starting encryption is an asynchronous operation. Upon receiving the request, - * Kinesis Data Streams returns immediately and sets the status of the stream to + *

                              Enables or updates server-side encryption using an Amazon Web Services KMS key for a + * specified stream.

                              + *

                              Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis + * Data Streams returns immediately and sets the status of the stream to * UPDATING. After the update is complete, Kinesis Data Streams sets the * status of the stream back to ACTIVE. Updating or applying encryption * normally takes a few seconds to complete, but it can take minutes. You can continue to * read and write data to your stream while its status is UPDATING. Once the * status of the stream is ACTIVE, encryption begins for records written to * the stream.

                              - *

                              API Limits: You can successfully apply a new AWS KMS key for server-side encryption - * 25 times in a rolling 24-hour period.

                              - *

                              Note: It can take up to 5 seconds after the stream is in an ACTIVE - * status before all records written to the stream are encrypted. After you enable - * encryption, you can verify that encryption is applied by inspecting the API response - * from PutRecord or PutRecords.

                              + *

                              API Limits: You can successfully apply a new Amazon Web Services KMS key for + * server-side encryption 25 times in a rolling 24-hour period.

                              + *

                              Note: It can take up to 5 seconds after the stream is in an ACTIVE status + * before all records written to the stream are encrypted. After you enable encryption, you + * can verify that encryption is applied by inspecting the API response from + * PutRecord or PutRecords.

                              */ public startStreamEncryption( args: StartStreamEncryptionCommandInput, @@ -1297,21 +1315,20 @@ export class Kinesis extends KinesisClient { /** *

                              Disables server-side encryption for a specified stream.

                              - *

                              Stopping encryption is an asynchronous operation. Upon receiving the request, - * Kinesis Data Streams returns immediately and sets the status of the stream to + *

                              Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis + * Data Streams returns immediately and sets the status of the stream to * UPDATING. After the update is complete, Kinesis Data Streams sets the * status of the stream back to ACTIVE. Stopping encryption normally takes a * few seconds to complete, but it can take minutes. You can continue to read and write * data to your stream while its status is UPDATING. Once the status of the * stream is ACTIVE, records written to the stream are no longer encrypted by * Kinesis Data Streams.

                              - *

                              API Limits: You can successfully disable server-side encryption 25 times in a - * rolling 24-hour period.

                              - *

                              Note: It can take up to 5 seconds after the stream is in an ACTIVE - * status before all records written to the stream are no longer subject to encryption. - * After you disabled encryption, you can verify that encryption is not applied by - * inspecting the API response from PutRecord or - * PutRecords.

                              + *

                              API Limits: You can successfully disable server-side encryption 25 times in a rolling + * 24-hour period.

                              + *

                              Note: It can take up to 5 seconds after the stream is in an ACTIVE status + * before all records written to the stream are no longer subject to encryption. After you + * disabled encryption, you can verify that encryption is not applied by inspecting the API + * response from PutRecord or PutRecords.

                              */ public stopStreamEncryption( args: StopStreamEncryptionCommandInput, @@ -1361,8 +1378,9 @@ export class Kinesis extends KinesisClient { *

                              If you call SubscribeToShard again with the same ConsumerARN * and ShardId within 5 seconds of a successful call, you'll get a * ResourceInUseException. If you call SubscribeToShard 5 - * seconds or more after a successful call, the first connection will expire and the second - * call will take over the subscription.

                              + * seconds or more after a successful call, the second call takes over the subscription and + * the previous connection expires or fails with a + * ResourceInUseException.

                              *

                              For an example of how to use this operations, see Enhanced Fan-Out * Using the Kinesis Data Streams API.

                              */ @@ -1408,8 +1426,8 @@ export class Kinesis extends KinesisClient { * individual shards. This can cause short-lived shards to be created, in addition to the * final shards. These short-lived shards count towards your total shard limit for your * account in the Region.

                              - *

                              When using this operation, we recommend that you specify a target shard count that - * is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your + *

                              When using this operation, we recommend that you specify a target shard count that is + * a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your * shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling * action might take longer to complete.

                              *

                              This operation has the following default limits. By default, you cannot do the @@ -1419,27 +1437,26 @@ export class Kinesis extends KinesisClient { *

                              Scale more than ten times per rolling 24-hour period per stream

                              *
                            • *
                            • - *

                              Scale up to more than double your current shard count for a - * stream

                              + *

                              Scale up to more than double your current shard count for a stream

                              *
                            • *
                            • *

                              Scale down below half your current shard count for a stream

                              *
                            • *
                            • - *

                              Scale up to more than 500 shards in a stream

                              + *

                              Scale up to more than 10000 shards in a stream

                              *
                            • *
                            • - *

                              Scale a stream with more than 500 shards down unless the result is less - * than 500 shards

                              + *

                              Scale a stream with more than 10000 shards down unless the result is less than + * 10000 shards

                              *
                            • *
                            • *

                              Scale up to more than the shard limit for your account

                              *
                            • *
                            - *

                            For the default limits for an AWS account, see Streams Limits in the - * Amazon Kinesis Data Streams Developer Guide. To request an - * increase in the call rate limit, the shard limit for this API, or your overall shard - * limit, use the limits form.

                            + *

                            For the default limits for an Amazon Web Services account, see Streams + * Limits in the Amazon Kinesis Data Streams Developer + * Guide. To request an increase in the call rate limit, the shard limit for + * this API, or your overall shard limit, use the limits form.

                            */ public updateShardCount( args: UpdateShardCountCommandInput, @@ -1469,4 +1486,39 @@ export class Kinesis extends KinesisClient { return this.send(command, optionsOrCb); } } + + /** + *

                            Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you + * can choose between an on-demand capacity mode and a + * provisioned capacity mode for your data stream. + *

                            + */ + public updateStreamMode( + args: UpdateStreamModeCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateStreamMode( + args: UpdateStreamModeCommandInput, + cb: (err: any, data?: UpdateStreamModeCommandOutput) => void + ): void; + public updateStreamMode( + args: UpdateStreamModeCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateStreamModeCommandOutput) => void + ): void; + public updateStreamMode( + args: UpdateStreamModeCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateStreamModeCommandOutput) => void), + cb?: (err: any, data?: UpdateStreamModeCommandOutput) => void + ): Promise | void { + const command = new UpdateStreamModeCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } } diff --git a/clients/client-kinesis/src/KinesisClient.ts b/clients/client-kinesis/src/KinesisClient.ts index 942a2094cf08..9955b9ed9204 100644 --- a/clients/client-kinesis/src/KinesisClient.ts +++ b/clients/client-kinesis/src/KinesisClient.ts @@ -119,6 +119,7 @@ import { } from "./commands/StopStreamEncryptionCommand"; import { SubscribeToShardCommandInput, SubscribeToShardCommandOutput } from "./commands/SubscribeToShardCommand"; import { UpdateShardCountCommandInput, UpdateShardCountCommandOutput } from "./commands/UpdateShardCountCommand"; +import { UpdateStreamModeCommandInput, UpdateStreamModeCommandOutput } from "./commands/UpdateStreamModeCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = @@ -149,7 +150,8 @@ export type ServiceInputTypes = | StartStreamEncryptionCommandInput | StopStreamEncryptionCommandInput | SubscribeToShardCommandInput - | UpdateShardCountCommandInput; + | UpdateShardCountCommandInput + | UpdateStreamModeCommandInput; export type ServiceOutputTypes = | AddTagsToStreamCommandOutput @@ -179,7 +181,8 @@ export type ServiceOutputTypes = | StartStreamEncryptionCommandOutput | StopStreamEncryptionCommandOutput | SubscribeToShardCommandOutput - | UpdateShardCountCommandOutput; + | UpdateShardCountCommandOutput + | UpdateStreamModeCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** @@ -338,8 +341,8 @@ export interface KinesisClientResolvedConfig extends KinesisClientResolvedConfig /** * Amazon Kinesis Data Streams Service API Reference - *

                            Amazon Kinesis Data Streams is a managed service that scales elastically for - * real-time processing of streaming big data.

                            + *

                            Amazon Kinesis Data Streams is a managed service that scales elastically for real-time + * processing of streaming big data.

                            */ export class KinesisClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-kinesis/src/commands/AddTagsToStreamCommand.ts b/clients/client-kinesis/src/commands/AddTagsToStreamCommand.ts index b2c91e67673a..abe13a699af2 100644 --- a/clients/client-kinesis/src/commands/AddTagsToStreamCommand.ts +++ b/clients/client-kinesis/src/commands/AddTagsToStreamCommand.ts @@ -22,10 +22,8 @@ export interface AddTagsToStreamCommandInput extends AddTagsToStreamInput {} export interface AddTagsToStreamCommandOutput extends __MetadataBearer {} /** - *

                            Adds or updates tags for the specified Kinesis data stream. Each time you invoke - * this operation, you can specify up to 10 tags. If you want to add more than 10 tags to - * your stream, you can invoke this operation multiple times. In total, each stream can - * have up to 50 tags.

                            + *

                            Adds or updates tags for the specified Kinesis data stream. You can assign up to 50 + * tags to a data stream.

                            *

                            If tags have already been assigned to the stream, AddTagsToStream * overwrites any existing tags that correspond to the specified tag keys.

                            *

                            diff --git a/clients/client-kinesis/src/commands/CreateStreamCommand.ts b/clients/client-kinesis/src/commands/CreateStreamCommand.ts index 0f3916f7f25e..5563fb34a5cb 100644 --- a/clients/client-kinesis/src/commands/CreateStreamCommand.ts +++ b/clients/client-kinesis/src/commands/CreateStreamCommand.ts @@ -22,19 +22,19 @@ export interface CreateStreamCommandInput extends CreateStreamInput {} export interface CreateStreamCommandOutput extends __MetadataBearer {} /** - *

                            Creates a Kinesis data stream. A stream captures and transports data records that - * are continuously emitted from different data sources or producers. + *

                            Creates a Kinesis data stream. A stream captures and transports data records that are + * continuously emitted from different data sources or producers. * Scale-out within a stream is explicitly supported by means of shards, which are uniquely * identified groups of data records in a stream.

                            - *

                            You specify and control the number of shards that a stream is composed of. Each - * shard can support reads up to five transactions per second, up to a maximum data read - * total of 2 MiB per second. Each shard can support writes up to 1,000 records per second, - * up to a maximum data write total of 1 MiB per second. If the amount of data input - * increases or decreases, you can add or remove shards.

                            - *

                            The stream name identifies the stream. The name is scoped to the AWS account used - * by the application. It is also scoped by AWS Region. That is, two streams in two - * different accounts can have the same name, and two streams in the same account, but in - * two different Regions, can have the same name.

                            + *

                            You specify and control the number of shards that a stream is composed of. Each shard + * can support reads up to five transactions per second, up to a maximum data read total of + * 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a + * maximum data write total of 1 MiB per second. If the amount of data input increases or + * decreases, you can add or remove shards.

                            + *

                            The stream name identifies the stream. The name is scoped to the Amazon Web Services + * account used by the application. It is also scoped by Amazon Web Services Region. That + * is, two streams in two different accounts can have the same name, and two streams in the + * same account, but in two different Regions, can have the same name.

                            *

                            * CreateStream is an asynchronous operation. Upon receiving a * CreateStream request, Kinesis Data Streams immediately returns and sets @@ -46,20 +46,20 @@ export interface CreateStreamCommandOutput extends __MetadataBearer {} *

                              *
                            • * - *

                              Have more than five streams in the CREATING state at any point - * in time.

                              + *

                              Have more than five streams in the CREATING state at any point in + * time.

                              *
                            • *
                            • * *

                              Create more shards than are authorized for your account.

                              *
                            • *
                            - *

                            For the default shard limit for an AWS account, see Amazon Kinesis Data Streams - * Limits in the Amazon Kinesis Data Streams Developer - * Guide. To increase this limit, contact AWS - * Support.

                            - *

                            You can use DescribeStream to check the stream status, which is - * returned in StreamStatus.

                            + *

                            For the default shard limit for an Amazon Web Services account, see Amazon + * Kinesis Data Streams Limits in the Amazon Kinesis Data Streams + * Developer Guide. To increase this limit, contact Amazon Web Services + * Support.

                            + *

                            You can use DescribeStreamSummary to check the stream status, which + * is returned in StreamStatus.

                            *

                            * CreateStream has a limit of five transactions per second per * account.

                            diff --git a/clients/client-kinesis/src/commands/DecreaseStreamRetentionPeriodCommand.ts b/clients/client-kinesis/src/commands/DecreaseStreamRetentionPeriodCommand.ts index 257756707eb4..87f58f819cdb 100644 --- a/clients/client-kinesis/src/commands/DecreaseStreamRetentionPeriodCommand.ts +++ b/clients/client-kinesis/src/commands/DecreaseStreamRetentionPeriodCommand.ts @@ -22,12 +22,12 @@ export interface DecreaseStreamRetentionPeriodCommandInput extends DecreaseStrea export interface DecreaseStreamRetentionPeriodCommandOutput extends __MetadataBearer {} /** - *

                            Decreases the Kinesis data stream's retention period, which is the length of time - * data records are accessible after they are added to the stream. The minimum value of a + *

                            Decreases the Kinesis data stream's retention period, which is the length of time data + * records are accessible after they are added to the stream. The minimum value of a * stream's retention period is 24 hours.

                            - *

                            This operation may result in lost data. For example, if the stream's retention - * period is 48 hours and is decreased to 24 hours, any data already in the stream that is - * older than 24 hours is inaccessible.

                            + *

                            This operation may result in lost data. For example, if the stream's retention period + * is 48 hours and is decreased to 24 hours, any data already in the stream that is older + * than 24 hours is inaccessible.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/DeleteStreamCommand.ts b/clients/client-kinesis/src/commands/DeleteStreamCommand.ts index f5c95bdde48e..ea4a12f90a0b 100644 --- a/clients/client-kinesis/src/commands/DeleteStreamCommand.ts +++ b/clients/client-kinesis/src/commands/DeleteStreamCommand.ts @@ -34,10 +34,11 @@ export interface DeleteStreamCommandOutput extends __MetadataBearer {} * Note: Kinesis Data Streams might continue to accept * data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the * DELETING state until the stream deletion is complete.

                            - *

                            When you delete a stream, any shards in that stream are also deleted, and any tags - * are dissociated from the stream.

                            - *

                            You can use the DescribeStream operation to check the state of - * the stream, which is returned in StreamStatus.

                            + *

                            When you delete a stream, any shards in that stream are also deleted, and any tags are + * dissociated from the stream.

                            + *

                            You can use the DescribeStreamSummary operation to check the state + * of the stream, which is returned in StreamStatus.

                            + * *

                            * DeleteStream has a limit of five transactions per second per * account.

                            diff --git a/clients/client-kinesis/src/commands/DescribeStreamCommand.ts b/clients/client-kinesis/src/commands/DescribeStreamCommand.ts index a844548405d3..c7e033b5229b 100644 --- a/clients/client-kinesis/src/commands/DescribeStreamCommand.ts +++ b/clients/client-kinesis/src/commands/DescribeStreamCommand.ts @@ -23,7 +23,11 @@ export interface DescribeStreamCommandOutput extends DescribeStreamOutput, __Met /** *

                            Describes the specified Kinesis data stream.

                            - * + * + *

                            This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the + * specified Kinesis data stream and the ListShards API to list the + * shards in a specified data stream and obtain information about each shard.

                            + *
                            *

                            The information returned includes the stream name, Amazon Resource Name (ARN), * creation time, enhanced metric configuration, and shard map. The shard map is an array * of shard objects. For each shard object, there is the hash key and sequence number diff --git a/clients/client-kinesis/src/commands/DescribeStreamSummaryCommand.ts b/clients/client-kinesis/src/commands/DescribeStreamSummaryCommand.ts index 877a49f628c1..6459171530ef 100644 --- a/clients/client-kinesis/src/commands/DescribeStreamSummaryCommand.ts +++ b/clients/client-kinesis/src/commands/DescribeStreamSummaryCommand.ts @@ -24,12 +24,12 @@ export interface DescribeStreamSummaryCommandOutput extends DescribeStreamSummar /** *

                            Provides a summarized description of the specified Kinesis data stream without the * shard list.

                            - *

                            The information returned includes the stream name, Amazon Resource Name (ARN), - * status, record retention period, approximate creation time, monitoring, encryption - * details, and open shard count.

                            + *

                            The information returned includes the stream name, Amazon Resource Name (ARN), status, + * record retention period, approximate creation time, monitoring, encryption details, and + * open shard count.

                            *

                            - * DescribeStreamSummary has a limit of 20 transactions per second - * per account.

                            + * DescribeStreamSummary has a limit of 20 transactions per second per + * account.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/GetRecordsCommand.ts b/clients/client-kinesis/src/commands/GetRecordsCommand.ts index 17c5ebcd6e3f..13c03d87daa7 100644 --- a/clients/client-kinesis/src/commands/GetRecordsCommand.ts +++ b/clients/client-kinesis/src/commands/GetRecordsCommand.ts @@ -40,34 +40,44 @@ export interface GetRecordsCommandOutput extends GetRecordsOutput, __MetadataBea * You can terminate the loop when the shard is closed, or when the shard iterator reaches * the record with the sequence number or other attribute that marks it as the last record * to process.

                            - *

                            Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB - * per second. You can ensure that your calls don't exceed the maximum supported size or + *

                            Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per + * second. You can ensure that your calls don't exceed the maximum supported size or * throughput by using the Limit parameter to specify the maximum number of * records that GetRecords can return. Consider your average record size * when determining this limit. The maximum number of records that can be returned per call * is 10,000.

                            * - *

                            The size of the data returned by GetRecords varies depending on - * the utilization of the shard. The maximum size of data that GetRecords - * can return is 10 MiB. If a call returns this amount of data, subsequent calls made - * within the next 5 seconds throw ProvisionedThroughputExceededException. If - * there is insufficient provisioned throughput on the stream, subsequent calls made within - * the next 1 second throw ProvisionedThroughputExceededException. GetRecords doesn't return any data when it throws an exception. For this - * reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1 - * second.

                            + *

                            The size of the data returned by GetRecords varies depending on the + * utilization of the shard. It is recommended that consumer applications retrieve records + * via the GetRecords command using the 5 TPS limit to remain caught up. + * Retrieving records less frequently can lead to consumer applications falling behind. The + * maximum size of data that GetRecords can return is 10 MiB. If a call + * returns this amount of data, subsequent calls made within the next 5 seconds throw + * ProvisionedThroughputExceededException. If there is insufficient + * provisioned throughput on the stream, subsequent calls made within the next 1 second + * throw ProvisionedThroughputExceededException. GetRecords + * doesn't return any data when it throws an exception. For this reason, we recommend that + * you wait 1 second between calls to GetRecords. However, it's possible + * that the application will get exceptions for longer than 1 second.

                            + * + * + * + * + * + * + * + * *

                            To detect whether the application is falling behind in processing, you can use the * MillisBehindLatest response attribute. You can also monitor the stream * using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon * Kinesis Data Streams Developer Guide).

                            - *

                            Each Amazon Kinesis record includes a value, - * ApproximateArrivalTimestamp, that is set when a stream successfully - * receives and stores a record. This is commonly referred to as a server-side time stamp, - * whereas a client-side time stamp is set when a data producer creates or sends the record - * to a stream (a data producer is any data source putting data records into a stream, for - * example with PutRecords). The time stamp has millisecond precision. - * There are no guarantees about the time stamp accuracy, or that the time stamp is always - * increasing. For example, records in a shard or across a stream might have time stamps - * that are out of order.

                            + *

                            Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp, + * that is set when a stream successfully receives and stores a record. This is commonly + * referred to as a server-side time stamp, whereas a client-side time stamp is set when a + * data producer creates or sends the record to a stream (a data producer is any data + * source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time + * stamp accuracy, or that the time stamp is always increasing. For example, records in a + * shard or across a stream might have time stamps that are out of order.

                            *

                            This operation has a limit of five transactions per second per shard.

                            * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-kinesis/src/commands/GetShardIteratorCommand.ts b/clients/client-kinesis/src/commands/GetShardIteratorCommand.ts index 3aee92d4ee5e..9752fc725b03 100644 --- a/clients/client-kinesis/src/commands/GetShardIteratorCommand.ts +++ b/clients/client-kinesis/src/commands/GetShardIteratorCommand.ts @@ -22,13 +22,13 @@ export interface GetShardIteratorCommandInput extends GetShardIteratorInput {} export interface GetShardIteratorCommandOutput extends GetShardIteratorOutput, __MetadataBearer {} /** - *

                            Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it - * is returned to the requester.

                            - *

                            A shard iterator specifies the shard position from which to start reading data - * records sequentially. The position is specified using the sequence number of a data - * record in a shard. A sequence number is the identifier associated with every record - * ingested in the stream, and is assigned when a record is put into the stream. Each - * stream has one or more shards.

                            + *

                            Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is + * returned to the requester.

                            + *

                            A shard iterator specifies the shard position from which to start reading data records + * sequentially. The position is specified using the sequence number of a data record in a + * shard. A sequence number is the identifier associated with every record ingested in the + * stream, and is assigned when a record is put into the stream. Each stream has one or + * more shards.

                            *

                            You must specify the shard iterator type. For example, you can set the * ShardIteratorType parameter to read exactly from the position denoted * by a specific sequence number by using the AT_SEQUENCE_NUMBER shard diff --git a/clients/client-kinesis/src/commands/IncreaseStreamRetentionPeriodCommand.ts b/clients/client-kinesis/src/commands/IncreaseStreamRetentionPeriodCommand.ts index 244843ab0649..93b0a0e80ab8 100644 --- a/clients/client-kinesis/src/commands/IncreaseStreamRetentionPeriodCommand.ts +++ b/clients/client-kinesis/src/commands/IncreaseStreamRetentionPeriodCommand.ts @@ -22,9 +22,9 @@ export interface IncreaseStreamRetentionPeriodCommandInput extends IncreaseStrea export interface IncreaseStreamRetentionPeriodCommandOutput extends __MetadataBearer {} /** - *

                            Increases the Kinesis data stream's retention period, which is the length of time - * data records are accessible after they are added to the stream. The maximum value of a - * stream's retention period is 168 hours (7 days).

                            + *

                            Increases the Kinesis data stream's retention period, which is the length of time data + * records are accessible after they are added to the stream. The maximum value of a + * stream's retention period is 8760 hours (365 days).

                            *

                            If you choose a longer stream retention period, this operation increases the time * period during which records that have not yet expired are accessible. However, it does * not make previous, expired data (older than the stream's previous retention period) diff --git a/clients/client-kinesis/src/commands/ListShardsCommand.ts b/clients/client-kinesis/src/commands/ListShardsCommand.ts index f0d99eb32f2f..cf29b0e1083e 100644 --- a/clients/client-kinesis/src/commands/ListShardsCommand.ts +++ b/clients/client-kinesis/src/commands/ListShardsCommand.ts @@ -22,8 +22,10 @@ export interface ListShardsCommandInput extends ListShardsInput {} export interface ListShardsCommandOutput extends ListShardsOutput, __MetadataBearer {} /** - *

                            Lists the shards in a stream and provides information about each shard. This - * operation has a limit of 100 transactions per second per data stream.

                            + *

                            Lists the shards in a stream and provides information about each shard. This operation + * has a limit of 1000 transactions per second per data stream.

                            + *

                            This action does not list expired shards. For information about expired shards, see + * Data Routing, Data Persistence, and Shard State after a Reshard.

                            * *

                            This API is a new operation that is used by the Amazon Kinesis Client Library * (KCL). If you have a fine-grained IAM policy that only allows specific operations, diff --git a/clients/client-kinesis/src/commands/ListStreamsCommand.ts b/clients/client-kinesis/src/commands/ListStreamsCommand.ts index 1f1cb02463e0..7f0458e6e0b8 100644 --- a/clients/client-kinesis/src/commands/ListStreamsCommand.ts +++ b/clients/client-kinesis/src/commands/ListStreamsCommand.ts @@ -27,7 +27,7 @@ export interface ListStreamsCommandOutput extends ListStreamsOutput, __MetadataB * ListStreams. You can limit the number of returned streams using the * Limit parameter. If you do not specify a value for the * Limit parameter, Kinesis Data Streams uses the default limit, which is - * currently 10.

                            + * currently 100.

                            *

                            You can detect if there are more streams available to list by using the * HasMoreStreams flag from the returned output. If there are more streams * available, you can request more streams by using the name of the last stream returned by diff --git a/clients/client-kinesis/src/commands/MergeShardsCommand.ts b/clients/client-kinesis/src/commands/MergeShardsCommand.ts index 371f2476d66b..5d4fa5fa5adf 100644 --- a/clients/client-kinesis/src/commands/MergeShardsCommand.ts +++ b/clients/client-kinesis/src/commands/MergeShardsCommand.ts @@ -42,7 +42,7 @@ export interface MergeShardsCommandOutput extends __MetadataBearer {} * UPDATING, or DELETING state, MergeShards * returns a ResourceInUseException. If the specified stream does not exist, * MergeShards returns a ResourceNotFoundException.

                            - *

                            You can use DescribeStream to check the state of the stream, + *

                            You can use DescribeStreamSummary to check the state of the stream, * which is returned in StreamStatus.

                            *

                            * MergeShards is an asynchronous operation. Upon receiving a @@ -51,13 +51,13 @@ export interface MergeShardsCommandOutput extends __MetadataBearer {} * operation is completed, Kinesis Data Streams sets the StreamStatus to * ACTIVE. Read and write operations continue to work while the stream is * in the UPDATING state.

                            - *

                            You use DescribeStream to determine the shard IDs that are - * specified in the MergeShards request.

                            + *

                            You use DescribeStreamSummary and the ListShards + * APIs to determine the shard IDs that are specified in the MergeShards + * request.

                            *

                            If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards, * or SplitShard, you receive a LimitExceededException.

                            *

                            - * MergeShards has a limit of five transactions per second per - * account.

                            + * MergeShards has a limit of five transactions per second per account.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/PutRecordCommand.ts b/clients/client-kinesis/src/commands/PutRecordCommand.ts index b38ffe549002..1ba88149f28f 100644 --- a/clients/client-kinesis/src/commands/PutRecordCommand.ts +++ b/clients/client-kinesis/src/commands/PutRecordCommand.ts @@ -31,18 +31,18 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare * Kinesis Data Streams segregates the data records that belong to a stream into multiple * shards, using the partition key associated with each data record to determine the shard * to which a given data record belongs.

                            - *

                            Partition keys are Unicode strings, with a maximum length limit of 256 characters - * for each key. An MD5 hash function is used to map partition keys to 128-bit integer - * values and to map associated data records to shards using the hash key ranges of the - * shards. You can override hashing the partition key to determine the shard by explicitly + *

                            Partition keys are Unicode strings, with a maximum length limit of 256 characters for + * each key. An MD5 hash function is used to map partition keys to 128-bit integer values + * and to map associated data records to shards using the hash key ranges of the shards. + * You can override hashing the partition key to determine the shard by explicitly * specifying a hash value using the ExplicitHashKey parameter. For more * information, see Adding Data to a Stream in the Amazon Kinesis Data Streams * Developer Guide.

                            *

                            * PutRecord returns the shard ID of where the data record was placed and the * sequence number that was assigned to the data record.

                            - *

                            Sequence numbers increase over time and are specific to a shard within a stream, - * not across all shards within a stream. To guarantee strictly increasing ordering, write + *

                            Sequence numbers increase over time and are specific to a shard within a stream, not + * across all shards within a stream. To guarantee strictly increasing ordering, write * serially to a shard and use the SequenceNumberForOrdering parameter. For * more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams * Developer Guide.

                            @@ -53,8 +53,8 @@ export interface PutRecordCommandOutput extends PutRecordOutput, __MetadataBeare *

                            If a PutRecord request cannot be processed because of insufficient * provisioned throughput on the shard involved in the request, PutRecord * throws ProvisionedThroughputExceededException.

                            - *

                            By default, data records are accessible for 24 hours from the time that they are - * added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                            + *

                            By default, data records are accessible for 24 hours from the time that they are added + * to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/PutRecordsCommand.ts b/clients/client-kinesis/src/commands/PutRecordsCommand.ts index ac98c1a3d7cd..325ad5c58b70 100644 --- a/clients/client-kinesis/src/commands/PutRecordsCommand.ts +++ b/clients/client-kinesis/src/commands/PutRecordsCommand.ts @@ -25,8 +25,8 @@ export interface PutRecordsCommandOutput extends PutRecordsOutput, __MetadataBea *

                            Writes multiple data records into a Kinesis data stream in a single call (also * referred to as a PutRecords request). Use this operation to send data into * the stream for data ingestion and processing.

                            - *

                            Each PutRecords request can support up to 500 records. Each record in - * the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, + *

                            Each PutRecords request can support up to 500 records. Each record in the + * request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request, * including partition keys. Each shard can support writes up to 1,000 records per second, * up to a maximum data write total of 1 MiB per second.

                            *

                            You must specify the name of the stream that captures, stores, and transports the @@ -51,9 +51,9 @@ export interface PutRecordsCommandOutput extends PutRecordsOutput, __MetadataBea * record in the request array using natural ordering, from the top to the bottom of the * request and response. The response Records array always includes the same * number of records as the request array.

                            - *

                            The response Records array includes both successfully and - * unsuccessfully processed records. Kinesis Data Streams attempts to process all records - * in each PutRecords request. A single record failure does not stop the + *

                            The response Records array includes both successfully and unsuccessfully + * processed records. Kinesis Data Streams attempts to process all records in each + * PutRecords request. A single record failure does not stop the * processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering * of records. If you need to read records in the same order they are written to the * stream, use PutRecord instead of PutRecords, and write to @@ -76,8 +76,8 @@ export interface PutRecordsCommandOutput extends PutRecordsOutput, __MetadataBea *

                            After you write a record to a stream, you cannot modify that record or its order * within the stream.

                            *
                            - *

                            By default, data records are accessible for 24 hours from the time that they are - * added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                            + *

                            By default, data records are accessible for 24 hours from the time that they are added + * to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/SplitShardCommand.ts b/clients/client-kinesis/src/commands/SplitShardCommand.ts index 5a419384f222..8927812aa024 100644 --- a/clients/client-kinesis/src/commands/SplitShardCommand.ts +++ b/clients/client-kinesis/src/commands/SplitShardCommand.ts @@ -32,38 +32,36 @@ export interface SplitShardCommandOutput extends __MetadataBearer {} * SplitShard to increase stream capacity, so that more Kinesis Data * Streams applications can simultaneously read data from the stream for real-time * processing.

                            - *

                            You must specify the shard to be split and the new hash key, which is the position - * in the shard where the shard gets split in two. In many cases, the new hash key might be + *

                            You must specify the shard to be split and the new hash key, which is the position in + * the shard where the shard gets split in two. In many cases, the new hash key might be * the average of the beginning and ending hash key, but it can be any hash key value in * the range being mapped into the shard. For more information, see Split a * Shard in the Amazon Kinesis Data Streams Developer * Guide.

                            - *

                            You can use DescribeStream to determine the shard ID and hash key - * values for the ShardToSplit and NewStartingHashKey parameters - * that are specified in the SplitShard request.

                            + *

                            You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for the ShardToSplit + * and NewStartingHashKey parameters that are specified in the + * SplitShard request.

                            *

                            * SplitShard is an asynchronous operation. Upon receiving a * SplitShard request, Kinesis Data Streams immediately returns a response * and sets the stream status to UPDATING. After the operation is completed, * Kinesis Data Streams sets the stream status to ACTIVE. Read and write * operations continue to work while the stream is in the UPDATING state.

                            - *

                            You can use DescribeStream to check the status of the stream, which is - * returned in StreamStatus. If the stream is in the ACTIVE - * state, you can call SplitShard. If a stream is in CREATING or - * UPDATING or DELETING states, DescribeStream - * returns a ResourceInUseException.

                            - *

                            If the specified stream does not exist, DescribeStream returns a - * ResourceNotFoundException. If you try to create more shards than are - * authorized for your account, you receive a LimitExceededException.

                            - *

                            For the default shard limit for an AWS account, see Kinesis Data Streams - * Limits in the Amazon Kinesis Data Streams Developer - * Guide. To increase this limit, contact AWS - * Support.

                            + *

                            You can use DescribeStreamSummary to check the status of the stream, + * which is returned in StreamStatus. If the stream is in the + * ACTIVE state, you can call SplitShard. + *

                            + *

                            If the specified stream does not exist, DescribeStreamSummary + * returns a ResourceNotFoundException. If you try to create more shards than + * are authorized for your account, you receive a LimitExceededException.

                            + *

                            For the default shard limit for an Amazon Web Services account, see Kinesis + * Data Streams Limits in the Amazon Kinesis Data Streams Developer + * Guide. To increase this limit, contact Amazon Web Services + * Support.

                            *

                            If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a * LimitExceededException.

                            *

                            - * SplitShard has a limit of five transactions per second per - * account.

                            + * SplitShard has a limit of five transactions per second per account.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/StartStreamEncryptionCommand.ts b/clients/client-kinesis/src/commands/StartStreamEncryptionCommand.ts index 240dfc297ecd..f6be0a4db2b9 100644 --- a/clients/client-kinesis/src/commands/StartStreamEncryptionCommand.ts +++ b/clients/client-kinesis/src/commands/StartStreamEncryptionCommand.ts @@ -22,22 +22,22 @@ export interface StartStreamEncryptionCommandInput extends StartStreamEncryption export interface StartStreamEncryptionCommandOutput extends __MetadataBearer {} /** - *

                            Enables or updates server-side encryption using an AWS KMS key for a specified - * stream.

                            - *

                            Starting encryption is an asynchronous operation. Upon receiving the request, - * Kinesis Data Streams returns immediately and sets the status of the stream to + *

                            Enables or updates server-side encryption using an Amazon Web Services KMS key for a + * specified stream.

                            + *

                            Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis + * Data Streams returns immediately and sets the status of the stream to * UPDATING. After the update is complete, Kinesis Data Streams sets the * status of the stream back to ACTIVE. Updating or applying encryption * normally takes a few seconds to complete, but it can take minutes. You can continue to * read and write data to your stream while its status is UPDATING. Once the * status of the stream is ACTIVE, encryption begins for records written to * the stream.

                            - *

                            API Limits: You can successfully apply a new AWS KMS key for server-side encryption - * 25 times in a rolling 24-hour period.

                            - *

                            Note: It can take up to 5 seconds after the stream is in an ACTIVE - * status before all records written to the stream are encrypted. After you enable - * encryption, you can verify that encryption is applied by inspecting the API response - * from PutRecord or PutRecords.

                            + *

                            API Limits: You can successfully apply a new Amazon Web Services KMS key for + * server-side encryption 25 times in a rolling 24-hour period.

                            + *

                            Note: It can take up to 5 seconds after the stream is in an ACTIVE status + * before all records written to the stream are encrypted. After you enable encryption, you + * can verify that encryption is applied by inspecting the API response from + * PutRecord or PutRecords.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/StopStreamEncryptionCommand.ts b/clients/client-kinesis/src/commands/StopStreamEncryptionCommand.ts index 26e25b2690ae..0d0435575fc0 100644 --- a/clients/client-kinesis/src/commands/StopStreamEncryptionCommand.ts +++ b/clients/client-kinesis/src/commands/StopStreamEncryptionCommand.ts @@ -23,21 +23,20 @@ export interface StopStreamEncryptionCommandOutput extends __MetadataBearer {} /** *

                            Disables server-side encryption for a specified stream.

                            - *

                            Stopping encryption is an asynchronous operation. Upon receiving the request, - * Kinesis Data Streams returns immediately and sets the status of the stream to + *

                            Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis + * Data Streams returns immediately and sets the status of the stream to * UPDATING. After the update is complete, Kinesis Data Streams sets the * status of the stream back to ACTIVE. Stopping encryption normally takes a * few seconds to complete, but it can take minutes. You can continue to read and write * data to your stream while its status is UPDATING. Once the status of the * stream is ACTIVE, records written to the stream are no longer encrypted by * Kinesis Data Streams.

                            - *

                            API Limits: You can successfully disable server-side encryption 25 times in a - * rolling 24-hour period.

                            - *

                            Note: It can take up to 5 seconds after the stream is in an ACTIVE - * status before all records written to the stream are no longer subject to encryption. - * After you disabled encryption, you can verify that encryption is not applied by - * inspecting the API response from PutRecord or - * PutRecords.

                            + *

                            API Limits: You can successfully disable server-side encryption 25 times in a rolling + * 24-hour period.

                            + *

                            Note: It can take up to 5 seconds after the stream is in an ACTIVE status + * before all records written to the stream are no longer subject to encryption. After you + * disabled encryption, you can verify that encryption is not applied by inspecting the API + * response from PutRecord or PutRecords.

                            * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/SubscribeToShardCommand.ts b/clients/client-kinesis/src/commands/SubscribeToShardCommand.ts index 4b7cd5fe21f7..4cbcf8320bc1 100644 --- a/clients/client-kinesis/src/commands/SubscribeToShardCommand.ts +++ b/clients/client-kinesis/src/commands/SubscribeToShardCommand.ts @@ -41,8 +41,9 @@ export interface SubscribeToShardCommandOutput extends SubscribeToShardOutput, _ *

                            If you call SubscribeToShard again with the same ConsumerARN * and ShardId within 5 seconds of a successful call, you'll get a * ResourceInUseException. If you call SubscribeToShard 5 - * seconds or more after a successful call, the first connection will expire and the second - * call will take over the subscription.

                            + * seconds or more after a successful call, the second call takes over the subscription and + * the previous connection expires or fails with a + * ResourceInUseException.

                            *

                            For an example of how to use this operations, see Enhanced Fan-Out * Using the Kinesis Data Streams API.

                            * @example diff --git a/clients/client-kinesis/src/commands/UpdateShardCountCommand.ts b/clients/client-kinesis/src/commands/UpdateShardCountCommand.ts index f126b5fa6e50..eda7586e6e55 100644 --- a/clients/client-kinesis/src/commands/UpdateShardCountCommand.ts +++ b/clients/client-kinesis/src/commands/UpdateShardCountCommand.ts @@ -34,8 +34,8 @@ export interface UpdateShardCountCommandOutput extends UpdateShardCountOutput, _ * individual shards. This can cause short-lived shards to be created, in addition to the * final shards. These short-lived shards count towards your total shard limit for your * account in the Region.

                            - *

                            When using this operation, we recommend that you specify a target shard count that - * is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your + *

                            When using this operation, we recommend that you specify a target shard count that is + * a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your * shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling * action might take longer to complete.

                            *

                            This operation has the following default limits. By default, you cannot do the @@ -45,27 +45,26 @@ export interface UpdateShardCountCommandOutput extends UpdateShardCountOutput, _ *

                            Scale more than ten times per rolling 24-hour period per stream

                            *
                          • *
                          • - *

                            Scale up to more than double your current shard count for a - * stream

                            + *

                            Scale up to more than double your current shard count for a stream

                            *
                          • *
                          • *

                            Scale down below half your current shard count for a stream

                            *
                          • *
                          • - *

                            Scale up to more than 500 shards in a stream

                            + *

                            Scale up to more than 10000 shards in a stream

                            *
                          • *
                          • - *

                            Scale a stream with more than 500 shards down unless the result is less - * than 500 shards

                            + *

                            Scale a stream with more than 10000 shards down unless the result is less than + * 10000 shards

                            *
                          • *
                          • *

                            Scale up to more than the shard limit for your account

                            *
                          • *
                          - *

                          For the default limits for an AWS account, see Streams Limits in the - * Amazon Kinesis Data Streams Developer Guide. To request an - * increase in the call rate limit, the shard limit for this API, or your overall shard - * limit, use the limits form.

                          + *

                          For the default limits for an Amazon Web Services account, see Streams + * Limits in the Amazon Kinesis Data Streams Developer + * Guide. To request an increase in the call rate limit, the shard limit for + * this API, or your overall shard limit, use the limits form.

                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-kinesis/src/commands/UpdateStreamModeCommand.ts b/clients/client-kinesis/src/commands/UpdateStreamModeCommand.ts new file mode 100644 index 000000000000..19df79aec7c3 --- /dev/null +++ b/clients/client-kinesis/src/commands/UpdateStreamModeCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { KinesisClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../KinesisClient"; +import { UpdateStreamModeInput } from "../models/models_0"; +import { + deserializeAws_json1_1UpdateStreamModeCommand, + serializeAws_json1_1UpdateStreamModeCommand, +} from "../protocols/Aws_json1_1"; + +export interface UpdateStreamModeCommandInput extends UpdateStreamModeInput {} +export interface UpdateStreamModeCommandOutput extends __MetadataBearer {} + +/** + *

                          Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you + * can choose between an on-demand capacity mode and a + * provisioned capacity mode for your data stream. + *

                          + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { KinesisClient, UpdateStreamModeCommand } from "@aws-sdk/client-kinesis"; // ES Modules import + * // const { KinesisClient, UpdateStreamModeCommand } = require("@aws-sdk/client-kinesis"); // CommonJS import + * const client = new KinesisClient(config); + * const command = new UpdateStreamModeCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateStreamModeCommandInput} for command's `input` shape. + * @see {@link UpdateStreamModeCommandOutput} for command's `response` shape. + * @see {@link KinesisClientResolvedConfig | config} for KinesisClient's `config` shape. + * + */ +export class UpdateStreamModeCommand extends $Command< + UpdateStreamModeCommandInput, + UpdateStreamModeCommandOutput, + KinesisClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateStreamModeCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: KinesisClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "KinesisClient"; + const commandName = "UpdateStreamModeCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateStreamModeInput.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateStreamModeCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_json1_1UpdateStreamModeCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_json1_1UpdateStreamModeCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-kinesis/src/commands/index.ts b/clients/client-kinesis/src/commands/index.ts index eeee5c6e4983..f3e3cfcdcad4 100644 --- a/clients/client-kinesis/src/commands/index.ts +++ b/clients/client-kinesis/src/commands/index.ts @@ -26,3 +26,4 @@ export * from "./StartStreamEncryptionCommand"; export * from "./StopStreamEncryptionCommand"; export * from "./SubscribeToShardCommand"; export * from "./UpdateShardCountCommand"; +export * from "./UpdateStreamModeCommand"; diff --git a/clients/client-kinesis/src/models/models_0.ts b/clients/client-kinesis/src/models/models_0.ts index 658980958369..657823fe4e9b 100644 --- a/clients/client-kinesis/src/models/models_0.ts +++ b/clients/client-kinesis/src/models/models_0.ts @@ -47,8 +47,8 @@ export namespace InvalidArgumentException { } /** - *

                          The requested resource exceeds the maximum number allowed, or the number of - * concurrent stream requests exceeds the maximum number allowed.

                          + *

                          The requested resource exceeds the maximum number allowed, or the number of concurrent + * stream requests exceeds the maximum number allowed.

                          */ export interface LimitExceededException extends __SmithyException, $MetadataBearer { name: "LimitExceededException"; @@ -137,9 +137,21 @@ export namespace HashKeyRange { }); } +/** + *

                          Output parameter of the GetRecords API. The existing child shard of the current + * shard.

                          + */ export interface ChildShard { + /** + *

                          The shard ID of the existing child shard of the current shard.

                          + */ ShardId: string | undefined; + + /** + *

                          The current shard that is the parent of the existing child shard.

                          + */ ParentShards: string[] | undefined; + /** *

                          The range of possible hash key values for the shard, which is a set of ordered * contiguous positive integers.

                          @@ -249,15 +261,41 @@ export namespace ConsumerDescription { }); } +export enum StreamMode { + ON_DEMAND = "ON_DEMAND", + PROVISIONED = "PROVISIONED", +} + +/** + *

                          Specifies the capacity mode to which you want to set your data stream. Currently, in + * Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                          + */ +export interface StreamModeDetails { + /** + *

                          Specifies the capacity mode to which you want to set your data stream. Currently, in + * Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                          + */ + StreamMode: StreamMode | string | undefined; +} + +export namespace StreamModeDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StreamModeDetails): any => ({ + ...obj, + }); +} + /** *

                          Represents the input for CreateStream.

                          */ export interface CreateStreamInput { /** - *

                          A name to identify the stream. The stream name is scoped to the AWS account used by - * the application that creates the stream. It is also scoped by AWS Region. That is, two - * streams in two different AWS accounts can have the same name. Two streams in the same - * AWS account but in two different Regions can also have the same name.

                          + *

                          A name to identify the stream. The stream name is scoped to the Amazon Web Services + * account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts + * can have the same name. Two streams in the same Amazon Web Services account but in two + * different Regions can also have the same name.

                          */ StreamName: string | undefined; @@ -266,7 +304,15 @@ export interface CreateStreamInput { * function of the number of shards; more shards are required for greater provisioned * throughput.

                          */ - ShardCount: number | undefined; + ShardCount?: number; + + /** + *

                          Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams, + * you can choose between an on-demand capacity mode and a + * provisioned capacity mode for your data + * streams.

                          + */ + StreamModeDetails?: StreamModeDetails; } export namespace CreateStreamInput { @@ -332,7 +378,8 @@ export namespace DeleteStreamInput { export interface DeregisterStreamConsumerInput { /** *

                          The ARN of the Kinesis data stream that the consumer is registered with. For more - * information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                          + * information, see Amazon Resource Names (ARNs) and Amazon Web Services Service + * Namespaces.

                          */ StreamARN?: string; @@ -381,6 +428,16 @@ export interface DescribeLimitsOutput { *

                          The number of open shards.

                          */ OpenShardCount: number | undefined; + + /** + *

                          Indicates the number of data streams with the on-demand capacity mode.

                          + */ + OnDemandStreamCount: number | undefined; + + /** + *

                          The maximum number of data streams with the on-demand capacity mode.

                          + */ + OnDemandStreamCountLimit: number | undefined; } export namespace DescribeLimitsOutput { @@ -402,13 +459,18 @@ export interface DescribeStreamInput { StreamName: string | undefined; /** - *

                          The maximum number of shards to return in a single call. The default value is 100. - * If you specify a value greater than 100, at most 100 shards are returned.

                          + *

                          The maximum number of shards to return in a single call. The default value is 100. If + * you specify a value greater than 100, at most 100 results are returned.

                          */ Limit?: number; /** *

                          The shard ID of the shard to start with.

                          + *

                          Specify this parameter to indicate that you want to describe the stream starting with + * the shard whose ID immediately follows ExclusiveStartShardId.

                          + *

                          If you don't specify this parameter, the default behavior for + * DescribeStream is to describe the stream starting with the first shard + * in the stream.

                          */ ExclusiveStartShardId?: string; } @@ -444,8 +506,8 @@ export enum MetricsName { export interface EnhancedMetrics { /** *

                          List of shard-level metrics.

                          - *

                          The following are the valid shard-level metrics. The value "ALL" - * enhances every metric.

                          + *

                          The following are the valid shard-level metrics. The value "ALL" enhances + * every metric.

                          *
                            *
                          • *

                            @@ -622,6 +684,12 @@ export interface StreamDescription { */ StreamStatus: StreamStatus | string | undefined; + /** + *

                            Specifies the capacity mode to which you want to set your data stream. Currently, in + * Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                            + */ + StreamModeDetails?: StreamModeDetails; + /** *

                            The shards that comprise the stream.

                            */ @@ -650,8 +718,8 @@ export interface StreamDescription { EnhancedMonitoring: EnhancedMetrics[] | undefined; /** - *

                            The server-side encryption type used on the stream. This parameter can be one of - * the following values:

                            + *

                            The server-side encryption type used on the stream. This parameter can be one of the + * following values:

                            *
                              *
                            • *

                              @@ -660,17 +728,17 @@ export interface StreamDescription { *

                            • *

                              * KMS: Use server-side encryption on the records in the stream - * using a customer-managed AWS KMS key.

                              + * using a customer-managed Amazon Web Services KMS key.

                              *
                            • *
                            */ EncryptionType?: EncryptionType | string; /** - *

                            The GUID for the customer-managed AWS KMS key to use for encryption. This value can - * be a globally unique identifier, a fully specified ARN to either an alias or a key, or - * an alias name prefixed by "alias/".You can also use a master key owned by Kinesis Data - * Streams by specifying the alias aws/kinesis.

                            + *

                            The GUID for the customer-managed Amazon Web Services KMS key to use for encryption. + * This value can be a globally unique identifier, a fully specified ARN to either an alias + * or a key, or an alias name prefixed by "alias/".You can also use a master key owned by + * Kinesis Data Streams by specifying the alias aws/kinesis.

                            *
                              *
                            • *

                              Key ARN example: @@ -715,8 +783,8 @@ export namespace StreamDescription { */ export interface DescribeStreamOutput { /** - *

                              The current status of the stream, the stream Amazon Resource Name (ARN), an array - * of shard objects that comprise the stream, and whether there are more shards + *

                              The current status of the stream, the stream Amazon Resource Name (ARN), an array of + * shard objects that comprise the stream, and whether there are more shards * available.

                              */ StreamDescription: StreamDescription | undefined; @@ -734,7 +802,8 @@ export namespace DescribeStreamOutput { export interface DescribeStreamConsumerInput { /** *

                              The ARN of the Kinesis data stream that the consumer is registered with. For more - * information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                              + * information, see Amazon Resource Names (ARNs) and Amazon Web Services Service + * Namespaces.

                              */ StreamARN?: string; @@ -837,6 +906,12 @@ export interface StreamDescriptionSummary { */ StreamStatus: StreamStatus | string | undefined; + /** + *

                              Specifies the capacity mode to which you want to set your data stream. Currently, in + * Kinesis Data Streams, you can choose between an on-demand ycapacity mode and a provisioned capacity mode for your data streams.

                              + */ + StreamModeDetails?: StreamModeDetails; + /** *

                              The current retention period, in hours.

                              */ @@ -870,10 +945,10 @@ export interface StreamDescriptionSummary { EncryptionType?: EncryptionType | string; /** - *

                              The GUID for the customer-managed AWS KMS key to use for encryption. This value can - * be a globally unique identifier, a fully specified ARN to either an alias or a key, or - * an alias name prefixed by "alias/".You can also use a master key owned by Kinesis Data - * Streams by specifying the alias aws/kinesis.

                              + *

                              The GUID for the customer-managed Amazon Web Services KMS key to use for encryption. + * This value can be a globally unique identifier, a fully specified ARN to either an alias + * or a key, or an alias name prefixed by "alias/".You can also use a master key owned by + * Kinesis Data Streams by specifying the alias aws/kinesis.

                              *
                                *
                              • *

                                Key ARN example: @@ -945,15 +1020,14 @@ export namespace DescribeStreamSummaryOutput { */ export interface DisableEnhancedMonitoringInput { /** - *

                                The name of the Kinesis data stream for which to disable enhanced - * monitoring.

                                + *

                                The name of the Kinesis data stream for which to disable enhanced monitoring.

                                */ StreamName: string | undefined; /** *

                                List of shard-level metrics to disable.

                                - *

                                The following are the valid shard-level metrics. The value "ALL" - * disables every metric.

                                + *

                                The following are the valid shard-level metrics. The value "ALL" disables + * every metric.

                                *
                                  *
                                • *

                                  @@ -1022,14 +1096,14 @@ export interface EnhancedMonitoringOutput { StreamName?: string; /** - *

                                  Represents the current state of the metrics that are in the enhanced state before - * the operation.

                                  + *

                                  Represents the current state of the metrics that are in the enhanced state before the + * operation.

                                  */ CurrentShardLevelMetrics?: (MetricsName | string)[]; /** - *

                                  Represents the list of all the metrics that would be in the enhanced state after - * the operation.

                                  + *

                                  Represents the list of all the metrics that would be in the enhanced state after the + * operation.

                                  */ DesiredShardLevelMetrics?: (MetricsName | string)[]; } @@ -1054,8 +1128,8 @@ export interface EnableEnhancedMonitoringInput { /** *

                                  List of shard-level metrics to enable.

                                  - *

                                  The following are the valid shard-level metrics. The value "ALL" - * enables every metric.

                                  + *

                                  The following are the valid shard-level metrics. The value "ALL" enables + * every metric.

                                  *
                                    *
                                  • *

                                    @@ -1182,8 +1256,8 @@ export namespace GetRecordsInput { } /** - *

                                    The unit of data of the Kinesis data stream, which is composed of a sequence - * number, a partition key, and a data blob.

                                    + *

                                    The unit of data of the Kinesis data stream, which is composed of a sequence number, a + * partition key, and a data blob.

                                    */ export interface _Record { /** @@ -1220,7 +1294,7 @@ export interface _Record { *
                                  • *

                                    * KMS: Use server-side encryption on the records in the stream - * using a customer-managed AWS KMS key.

                                    + * using a customer-managed Amazon Web Services KMS key.

                                    *
                                  • *
                                  */ @@ -1246,20 +1320,24 @@ export interface GetRecordsOutput { Records: _Record[] | undefined; /** - *

                                  The next position in the shard from which to start sequentially reading data - * records. If set to null, the shard has been closed and the requested - * iterator does not return any more data.

                                  + *

                                  The next position in the shard from which to start sequentially reading data records. + * If set to null, the shard has been closed and the requested iterator does + * not return any more data.

                                  */ NextShardIterator?: string; /** - *

                                  The number of milliseconds the GetRecords response is from the - * tip of the stream, indicating how far behind current time the consumer is. A value of - * zero indicates that record processing is caught up, and there are no new records to - * process at this moment.

                                  + *

                                  The number of milliseconds the GetRecords response is from the tip + * of the stream, indicating how far behind current time the consumer is. A value of zero + * indicates that record processing is caught up, and there are no new records to process + * at this moment.

                                  */ MillisBehindLatest?: number; + /** + *

                                  The list of the current shard's child shards, returned in the GetRecords + * API's response only when the end of the current shard is reached.

                                  + */ ChildShards?: ChildShard[]; } @@ -1317,10 +1395,10 @@ export namespace KMSDisabledException { } /** - *

                                  The request was rejected because the state of the specified resource isn't valid - * for this request. For more information, see How Key State Affects Use of a - * Customer Master Key in the AWS Key Management Service Developer - * Guide.

                                  + *

                                  The request was rejected because the state of the specified resource isn't valid for + * this request. For more information, see How Key State Affects Use of a + * Customer Master Key in the Amazon Web Services Key Management + * Service Developer Guide.

                                  */ export interface KMSInvalidStateException extends __SmithyException, $MetadataBearer { name: "KMSInvalidStateException"; @@ -1363,7 +1441,7 @@ export namespace KMSNotFoundException { } /** - *

                                  The AWS access key ID needs a subscription for the service.

                                  + *

                                  The Amazon Web Services access key ID needs a subscription for the service.

                                  */ export interface KMSOptInRequired extends __SmithyException, $MetadataBearer { name: "KMSOptInRequired"; @@ -1386,7 +1464,8 @@ export namespace KMSOptInRequired { /** *

                                  The request was denied due to request throttling. For more information about * throttling, see Limits in - * the AWS Key Management Service Developer Guide.

                                  + * the Amazon Web Services Key Management Service Developer + * Guide.

                                  */ export interface KMSThrottlingException extends __SmithyException, $MetadataBearer { name: "KMSThrottlingException"; @@ -1411,8 +1490,7 @@ export namespace KMSThrottlingException { * the available throughput. Reduce the frequency or size of your requests. For more * information, see Streams Limits in the * Amazon Kinesis Data Streams Developer Guide, and Error Retries and - * Exponential Backoff in AWS in the AWS General - * Reference.

                                  + * Exponential Backoff in Amazon Web Services in the Amazon Web Services General Reference.

                                  */ export interface ProvisionedThroughputExceededException extends __SmithyException, $MetadataBearer { name: "ProvisionedThroughputExceededException"; @@ -1478,21 +1556,21 @@ export interface GetShardIteratorInput { *
                                • *
                                • * - *

                                  TRIM_HORIZON - Start reading at the last untrimmed record in the shard in - * the system, which is the oldest data record in the shard.

                                  + *

                                  TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the + * system, which is the oldest data record in the shard.

                                  *
                                • *
                                • * - *

                                  LATEST - Start reading just after the most recent record in the shard, so - * that you always read the most recent data in the shard.

                                  + *

                                  LATEST - Start reading just after the most recent record in the shard, so that + * you always read the most recent data in the shard.

                                  *
                                • *
                                */ ShardIteratorType: ShardIteratorType | string | undefined; /** - *

                                The sequence number of the data record in the shard from which to start reading. - * Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.

                                + *

                                The sequence number of the data record in the shard from which to start reading. Used + * with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.

                                */ StartingSequenceNumber?: string; @@ -1591,9 +1669,69 @@ export enum ShardFilterType { FROM_TRIM_HORIZON = "FROM_TRIM_HORIZON", } +/** + *

                                The request parameter used to filter out the response of the ListShards + * API.

                                + */ export interface ShardFilter { + /** + *

                                The shard type specified in the ShardFilter parameter. This is a required + * property of the ShardFilter parameter.

                                + *

                                You can specify the following valid values:

                                + *
                                  + *
                                • + *

                                  + * AFTER_SHARD_ID - the response includes all the shards, starting + * with the shard whose ID immediately follows the ShardId that you + * provided.

                                  + *
                                • + *
                                • + *

                                  + * AT_TRIM_HORIZON - the response includes all the shards that were + * open at TRIM_HORIZON.

                                  + *
                                • + *
                                • + *

                                  + * FROM_TRIM_HORIZON - (default), the response includes all the + * shards within the retention period of the data stream (trim to tip).

                                  + *
                                • + *
                                • + *

                                  + * AT_LATEST - the response includes only the currently open shards + * of the data stream.

                                  + *
                                • + *
                                • + *

                                  + * AT_TIMESTAMP - the response includes all shards whose start + * timestamp is less than or equal to the given timestamp and end timestamp is + * greater than or equal to the given timestamp or still open.

                                  + *
                                • + *
                                • + *

                                  + * FROM_TIMESTAMP - the response incldues all closed shards whose + * end timestamp is greater than or equal to the given timestamp and also all open + * shards. Corrected to TRIM_HORIZON of the data stream if + * FROM_TIMESTAMP is less than the TRIM_HORIZON + * value.

                                  + *
                                • + *
                                + */ Type: ShardFilterType | string | undefined; + + /** + *

                                The exclusive start shardID speified in the ShardFilter + * parameter. This property can only be used if the AFTER_SHARD_ID shard type + * is specified.

                                + */ ShardId?: string; + + /** + *

                                The timestamps specified in the ShardFilter parameter. A timestamp is a + * Unix epoch date with precision in milliseconds. For example, + * 2016-04-04T19:58:46.480-00:00 or 1459799926.480. This property can only be used if + * FROM_TIMESTAMP or AT_TIMESTAMP shard types are + * specified.

                                + */ Timestamp?: Date; } @@ -1615,18 +1753,18 @@ export interface ListShardsInput { StreamName?: string; /** - *

                                When the number of shards in the data stream is greater than the default value for - * the MaxResults parameter, or if you explicitly specify a value for + *

                                When the number of shards in the data stream is greater than the default value for the + * MaxResults parameter, or if you explicitly specify a value for * MaxResults that is less than the number of shards in the data stream, * the response includes a pagination token named NextToken. You can specify * this NextToken value in a subsequent call to ListShards to * list the next set of shards.

                                - *

                                Don't specify StreamName or StreamCreationTimestamp if - * you specify NextToken because the latter unambiguously identifies the + *

                                Don't specify StreamName or StreamCreationTimestamp if you + * specify NextToken because the latter unambiguously identifies the * stream.

                                - *

                                You can optionally specify a value for the MaxResults parameter when - * you specify NextToken. If you specify a MaxResults value that - * is less than the number of shards that the operation returns if you don't specify + *

                                You can optionally specify a value for the MaxResults parameter when you + * specify NextToken. If you specify a MaxResults value that is + * less than the number of shards that the operation returns if you don't specify * MaxResults, the response will contain a new NextToken * value. You can use the new NextToken value in a subsequent call to the * ListShards operation.

                                @@ -1634,15 +1772,14 @@ export interface ListShardsInput { *

                                Tokens expire after 300 seconds. When you obtain a value for * NextToken in the response to a call to ListShards, you * have 300 seconds to use that value. If you specify an expired token in a call to - * ListShards, you get - * ExpiredNextTokenException.

                                + * ListShards, you get ExpiredNextTokenException.

                                * */ NextToken?: string; /** - *

                                Specify this parameter to indicate that you want to list the shards starting with - * the shard whose ID immediately follows ExclusiveStartShardId.

                                + *

                                Specify this parameter to indicate that you want to list the shards starting with the + * shard whose ID immediately follows ExclusiveStartShardId.

                                *

                                If you don't specify this parameter, the default behavior is for * ListShards to list the shards starting with the first one in the * stream.

                                @@ -1652,8 +1789,8 @@ export interface ListShardsInput { /** *

                                The maximum number of shards to return in a single call to ListShards. - * The minimum value you can specify for this parameter is 1, and the maximum is 10,000, - * which is also the default.

                                + * The maximum number of shards to return in a single call. The default value is 1000. If + * you specify a value greater than 1000, at most 1000 results are returned.

                                *

                                When the number of shards to be listed is greater than the value of * MaxResults, the response contains a NextToken value that * you can use in a subsequent call to ListShards to list the next set of @@ -1662,15 +1799,35 @@ export interface ListShardsInput { MaxResults?: number; /** - *

                                Specify this input parameter to distinguish data streams that have the same name. - * For example, if you create a data stream and then delete it, and you later create - * another data stream with the same name, you can use this input parameter to specify - * which of the two streams you want to list the shards for.

                                + *

                                Specify this input parameter to distinguish data streams that have the same name. For + * example, if you create a data stream and then delete it, and you later create another + * data stream with the same name, you can use this input parameter to specify which of the + * two streams you want to list the shards for.

                                *

                                You cannot specify this parameter if you specify the NextToken * parameter.

                                */ StreamCreationTimestamp?: Date; + /** + *

                                Enables you to filter out the response of the ListShards API. You can + * only specify one filter at a time.

                                + *

                                If you use the ShardFilter parameter when invoking the ListShards API, + * the Type is the required property and must be specified. If you specify the + * AT_TRIM_HORIZON, FROM_TRIM_HORIZON, or + * AT_LATEST types, you do not need to specify either the + * ShardId or the Timestamp optional properties.

                                + *

                                If you specify the AFTER_SHARD_ID type, you must also provide the value + * for the optional ShardId property. The ShardId property is + * identical in fuctionality to the ExclusiveStartShardId parameter of the + * ListShards API. When ShardId property is specified, the + * response includes the shards starting with the shard whose ID immediately follows the + * ShardId that you provided.

                                + *

                                If you specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type, + * you must also provide the value for the optional Timestamp property. If you + * specify the AT_TIMESTAMP type, then all shards that were open at the provided timestamp + * are returned. If you specify the FROM_TIMESTAMP type, then all shards starting from the + * provided timestamp to TIP are returned.

                                + */ ShardFilter?: ShardFilter; } @@ -1693,8 +1850,8 @@ export interface ListShardsOutput { Shards?: Shard[]; /** - *

                                When the number of shards in the data stream is greater than the default value for - * the MaxResults parameter, or if you explicitly specify a value for + *

                                When the number of shards in the data stream is greater than the default value for the + * MaxResults parameter, or if you explicitly specify a value for * MaxResults that is less than the number of shards in the data stream, * the response includes a pagination token named NextToken. You can specify * this NextToken value in a subsequent call to ListShards to @@ -1704,8 +1861,7 @@ export interface ListShardsOutput { *

                                Tokens expire after 300 seconds. When you obtain a value for * NextToken in the response to a call to ListShards, you * have 300 seconds to use that value. If you specify an expired token in a call to - * ListShards, you get - * ExpiredNextTokenException.

                                + * ListShards, you get ExpiredNextTokenException.

                                * */ NextToken?: string; @@ -1723,7 +1879,8 @@ export namespace ListShardsOutput { export interface ListStreamConsumersInput { /** *

                                The ARN of the Kinesis data stream for which you want to list the registered - * consumers. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                                + * consumers. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service + * Namespaces.

                                */ StreamARN: string | undefined; @@ -1756,7 +1913,8 @@ export interface ListStreamConsumersInput { /** *

                                The maximum number of consumers that you want a single call of - * ListStreamConsumers to return.

                                + * ListStreamConsumers to return. The default value is 100. If you specify + * a value greater than 100, at most 100 results are returned.

                                */ MaxResults?: number; @@ -1819,7 +1977,8 @@ export namespace ListStreamConsumersOutput { */ export interface ListStreamsInput { /** - *

                                The maximum number of streams to list.

                                + *

                                The maximum number of streams to list. The default value is 100. If you specify a + * value greater than 100, at most 100 results are returned.

                                */ Limit?: number; @@ -1843,8 +2002,8 @@ export namespace ListStreamsInput { */ export interface ListStreamsOutput { /** - *

                                The names of the streams that are associated with the AWS account making the - * ListStreams request.

                                + *

                                The names of the streams that are associated with the Amazon Web Services account + * making the ListStreams request.

                                */ StreamNames: string[] | undefined; @@ -1873,8 +2032,8 @@ export interface ListTagsForStreamInput { StreamName: string | undefined; /** - *

                                The key to use as the starting point for the list of tags. If this parameter is - * set, ListTagsForStream gets all tags that occur after + *

                                The key to use as the starting point for the list of tags. If this parameter is set, + * ListTagsForStream gets all tags that occur after * ExclusiveStartTagKey.

                                */ ExclusiveStartTagKey?: string; @@ -1908,8 +2067,8 @@ export interface Tag { Key: string | undefined; /** - *

                                An optional string, typically used to describe or define the tag. Maximum length: - * 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % + *

                                An optional string, typically used to describe or define the tag. Maximum length: 256 + * characters. Valid characters: Unicode letters, digits, white space, _ . / = + - % * @

                                */ Value?: string; @@ -1936,8 +2095,8 @@ export interface ListTagsForStreamOutput { Tags: Tag[] | undefined; /** - *

                                If set to true, more tags are available. To request additional tags, - * set ExclusiveStartTagKey to the key of the last tag returned.

                                + *

                                If set to true, more tags are available. To request additional tags, set + * ExclusiveStartTagKey to the key of the last tag returned.

                                */ HasMoreTags: boolean | undefined; } @@ -1961,8 +2120,7 @@ export interface MergeShardsInput { StreamName: string | undefined; /** - *

                                The shard ID of the shard to combine with the adjacent shard for the - * merge.

                                + *

                                The shard ID of the shard to combine with the adjacent shard for the merge.

                                */ ShardToMerge: string | undefined; @@ -1981,6 +2139,26 @@ export namespace MergeShardsInput { }); } +/** + *

                                + * + *

                                + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message?: string; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + /** *

                                Represents the input for PutRecord.

                                */ @@ -2010,17 +2188,17 @@ export interface PutRecordInput { PartitionKey: string | undefined; /** - *

                                The hash value used to explicitly determine the shard the data record is assigned - * to by overriding the partition key hash.

                                + *

                                The hash value used to explicitly determine the shard the data record is assigned to + * by overriding the partition key hash.

                                */ ExplicitHashKey?: string; /** - *

                                Guarantees strictly increasing sequence numbers, for puts from the same client and - * to the same partition key. Usage: set the SequenceNumberForOrdering of - * record n to the sequence number of record n-1 - * (as returned in the result when putting record n-1). If this - * parameter is not set, records are coarsely ordered based on arrival time.

                                + *

                                Guarantees strictly increasing sequence numbers, for puts from the same client and to + * the same partition key. Usage: set the SequenceNumberForOrdering of record + * n to the sequence number of record n-1 (as + * returned in the result when putting record n-1). If this parameter + * is not set, records are coarsely ordered based on arrival time.

                                */ SequenceNumberForOrdering?: string; } @@ -2044,15 +2222,15 @@ export interface PutRecordOutput { ShardId: string | undefined; /** - *

                                The sequence number identifier that was assigned to the put data record. The - * sequence number for the record is unique across all records in the stream. A sequence - * number is the identifier associated with every record put into the stream.

                                + *

                                The sequence number identifier that was assigned to the put data record. The sequence + * number for the record is unique across all records in the stream. A sequence number is + * the identifier associated with every record put into the stream.

                                */ SequenceNumber: string | undefined; /** - *

                                The encryption type to use on the record. This parameter can be one of the - * following values:

                                + *

                                The encryption type to use on the record. This parameter can be one of the following + * values:

                                *
                                  *
                                • *

                                  @@ -2061,7 +2239,7 @@ export interface PutRecordOutput { *

                                • *

                                  * KMS: Use server-side encryption on the records in the stream - * using a customer-managed AWS KMS key.

                                  + * using a customer-managed Amazon Web Services KMS key.

                                  *
                                • *
                                */ @@ -2090,8 +2268,8 @@ export interface PutRecordsRequestEntry { Data: Uint8Array | undefined; /** - *

                                The hash value used to determine explicitly the shard that the data record is - * assigned to by overriding the partition key hash.

                                + *

                                The hash value used to determine explicitly the shard that the data record is assigned + * to by overriding the partition key hash.

                                */ ExplicitHashKey?: string; @@ -2141,11 +2319,10 @@ export namespace PutRecordsInput { } /** - *

                                Represents the result of an individual record from a PutRecords - * request. A record that is successfully added to a stream includes - * SequenceNumber and ShardId in the result. A record that - * fails to be added to the stream includes ErrorCode and - * ErrorMessage in the result.

                                + *

                                Represents the result of an individual record from a PutRecords request. + * A record that is successfully added to a stream includes SequenceNumber and + * ShardId in the result. A record that fails to be added to the stream + * includes ErrorCode and ErrorMessage in the result.

                                */ export interface PutRecordsResultEntry { /** @@ -2159,15 +2336,15 @@ export interface PutRecordsResultEntry { ShardId?: string; /** - *

                                The error code for an individual record result. ErrorCodes can be - * either ProvisionedThroughputExceededException or - * InternalFailure.

                                + *

                                The error code for an individual record result. ErrorCodes can be either + * ProvisionedThroughputExceededException or + * InternalFailure.

                                */ ErrorCode?: string; /** - *

                                The error message for an individual record result. An ErrorCode value - * of ProvisionedThroughputExceededException has an error message that + *

                                The error message for an individual record result. An ErrorCode value of + * ProvisionedThroughputExceededException has an error message that * includes the account ID, stream name, and shard ID. An ErrorCode value of * InternalFailure has the error message "Internal Service * Failure".

                                @@ -2196,11 +2373,10 @@ export interface PutRecordsOutput { FailedRecordCount?: number; /** - *

                                An array of successfully and unsuccessfully processed record results, correlated - * with the request by natural ordering. A record that is successfully added to a stream - * includes SequenceNumber and ShardId in the result. A record - * that fails to be added to a stream includes ErrorCode and - * ErrorMessage in the result.

                                + *

                                An array of successfully and unsuccessfully processed record results. A record that is + * successfully added to a stream includes SequenceNumber and + * ShardId in the result. A record that fails to be added to a stream + * includes ErrorCode and ErrorMessage in the result.

                                */ Records: PutRecordsResultEntry[] | undefined; @@ -2215,7 +2391,7 @@ export interface PutRecordsOutput { *
                              • *

                                * KMS: Use server-side encryption on the records using a - * customer-managed AWS KMS key.

                                + * customer-managed Amazon Web Services KMS key.

                                *
                              • *
                              */ @@ -2234,7 +2410,8 @@ export namespace PutRecordsOutput { export interface RegisterStreamConsumerInput { /** *

                              The ARN of the Kinesis data stream that you want to register the consumer with. For - * more info, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                              + * more info, see Amazon Resource Names (ARNs) and Amazon Web Services Service + * Namespaces.

                              */ StreamARN: string | undefined; @@ -2310,8 +2487,8 @@ export interface SplitShardInput { ShardToSplit: string | undefined; /** - *

                              A hash key value for the starting hash key of one of the child shards created by - * the split. The hash key range for a given shard constitutes a set of ordered contiguous + *

                              A hash key value for the starting hash key of one of the child shards created by the + * split. The hash key range for a given shard constitutes a set of ordered contiguous * positive integers. The value for NewStartingHashKey must be in the range of * hash keys being mapped into the shard. The NewStartingHashKey hash key * value and all higher hash key values in hash key range are distributed to one of the @@ -2342,11 +2519,11 @@ export interface StartStreamEncryptionInput { EncryptionType: EncryptionType | string | undefined; /** - *

                              The GUID for the customer-managed AWS KMS key to use for encryption. This value can - * be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either - * an alias or a key, or an alias name prefixed by "alias/".You can also use a master key - * owned by Kinesis Data Streams by specifying the alias - * aws/kinesis.

                              + *

                              The GUID for the customer-managed Amazon Web Services KMS key to use for encryption. + * This value can be a globally unique identifier, a fully specified Amazon Resource Name + * (ARN) to either an alias or a key, or an alias name prefixed by "alias/".You can also + * use a master key owned by Kinesis Data Streams by specifying the alias + * aws/kinesis.

                              *
                                *
                              • *

                                Key ARN example: @@ -2398,11 +2575,11 @@ export interface StopStreamEncryptionInput { EncryptionType: EncryptionType | string | undefined; /** - *

                                The GUID for the customer-managed AWS KMS key to use for encryption. This value can - * be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either - * an alias or a key, or an alias name prefixed by "alias/".You can also use a master key - * owned by Kinesis Data Streams by specifying the alias - * aws/kinesis.

                                + *

                                The GUID for the customer-managed Amazon Web Services KMS key to use for encryption. + * This value can be a globally unique identifier, a fully specified Amazon Resource Name + * (ARN) to either an alias or a key, or an alias name prefixed by "alias/".You can also + * use a master key owned by Kinesis Data Streams by specifying the alias + * aws/kinesis.

                                *
                                  *
                                • *

                                  Key ARN example: @@ -2443,7 +2620,7 @@ export namespace StopStreamEncryptionInput { } /** - *

                                  + *

                                  The starting position in the data stream from which to start streaming.

                                  */ export interface StartingPosition { /** @@ -2507,7 +2684,7 @@ export interface SubscribeToShardInput { ShardId: string | undefined; /** - *

                                  + *

                                  The starting position in the data stream from which to start streaming.

                                  */ StartingPosition: StartingPosition | undefined; } @@ -2546,6 +2723,10 @@ export interface SubscribeToShardEvent { */ MillisBehindLatest: number | undefined; + /** + *

                                  The list of the child shards of the current shard, returned only at the end of the + * current shard.

                                  + */ ChildShards?: ChildShard[]; } @@ -2650,10 +2831,10 @@ export namespace SubscribeToShardEventStream { } /** - *

                                  The request was rejected because the state of the specified resource isn't valid - * for this request. For more information, see How Key State Affects Use of a - * Customer Master Key in the AWS Key Management Service Developer - * Guide.

                                  + *

                                  The request was rejected because the state of the specified resource isn't valid for + * this request. For more information, see How Key State Affects Use of a + * Customer Master Key in the Amazon Web Services Key Management + * Service Developer Guide.

                                  */ export interface KMSInvalidStateExceptionMember { SubscribeToShardEvent?: never; @@ -2706,7 +2887,7 @@ export namespace SubscribeToShardEventStream { } /** - *

                                  The AWS access key ID needs a subscription for the service.

                                  + *

                                  The Amazon Web Services access key ID needs a subscription for the service.

                                  */ export interface KMSOptInRequiredMember { SubscribeToShardEvent?: never; @@ -2725,7 +2906,8 @@ export namespace SubscribeToShardEventStream { /** *

                                  The request was denied due to request throttling. For more information about * throttling, see Limits in - * the AWS Key Management Service Developer Guide.

                                  + * the Amazon Web Services Key Management Service Developer + * Guide.

                                  */ export interface KMSThrottlingExceptionMember { SubscribeToShardEvent?: never; @@ -2861,8 +3043,8 @@ export interface UpdateShardCountInput { StreamName: string | undefined; /** - *

                                  The new number of shards. This value has the following default limits. By default, - * you cannot do the following:

                                  + *

                                  The new number of shards. This value has the following default limits. By default, you + * cannot do the following:

                                  *
                                    *
                                  • *

                                    Set this value to more than double your current shard count for a @@ -2872,13 +3054,13 @@ export interface UpdateShardCountInput { *

                                    Set this value below half your current shard count for a stream.

                                    *
                                  • *
                                  • - *

                                    Set this value to more than 500 shards in a stream (the default limit for - * shard count per stream is 500 per account per region), unless you request a + *

                                    Set this value to more than 10000 shards in a stream (the default limit for + * shard count per stream is 10000 per account per region), unless you request a * limit increase.

                                    *
                                  • *
                                  • - *

                                    Scale a stream with more than 500 shards down unless you set this value to - * less than 500 shards.

                                    + *

                                    Scale a stream with more than 10000 shards down unless you set this value to + * less than 10000 shards.

                                    *
                                  • *
                                  */ @@ -2924,3 +3106,25 @@ export namespace UpdateShardCountOutput { ...obj, }); } + +export interface UpdateStreamModeInput { + /** + *

                                  Specifies the ARN of the data stream whose capacity mode you want to update.

                                  + */ + StreamARN: string | undefined; + + /** + *

                                  Specifies the capacity mode to which you want to set your data stream. Currently, in + * Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                                  + */ + StreamModeDetails: StreamModeDetails | undefined; +} + +export namespace UpdateStreamModeInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateStreamModeInput): any => ({ + ...obj, + }); +} diff --git a/clients/client-kinesis/src/protocols/Aws_json1_1.ts b/clients/client-kinesis/src/protocols/Aws_json1_1.ts index 8957800a32c2..35334cee2d37 100644 --- a/clients/client-kinesis/src/protocols/Aws_json1_1.ts +++ b/clients/client-kinesis/src/protocols/Aws_json1_1.ts @@ -82,6 +82,7 @@ import { } from "../commands/StopStreamEncryptionCommand"; import { SubscribeToShardCommandInput, SubscribeToShardCommandOutput } from "../commands/SubscribeToShardCommand"; import { UpdateShardCountCommandInput, UpdateShardCountCommandOutput } from "../commands/UpdateShardCountCommand"; +import { UpdateStreamModeCommandInput, UpdateStreamModeCommandOutput } from "../commands/UpdateStreamModeCommand"; import { _Record, AddTagsToStreamInput, @@ -152,6 +153,7 @@ import { StopStreamEncryptionInput, StreamDescription, StreamDescriptionSummary, + StreamModeDetails, SubscribeToShardEvent, SubscribeToShardEventStream, SubscribeToShardInput, @@ -159,6 +161,8 @@ import { Tag, UpdateShardCountInput, UpdateShardCountOutput, + UpdateStreamModeInput, + ValidationException, } from "../models/models_0"; export const serializeAws_json1_1AddTagsToStreamCommand = async ( @@ -525,6 +529,19 @@ export const serializeAws_json1_1UpdateShardCountCommand = async ( return buildHttpRpcRequest(context, headers, "/", undefined, body); }; +export const serializeAws_json1_1UpdateStreamModeCommand = async ( + input: UpdateStreamModeCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const headers: __HeaderBag = { + "content-type": "application/x-amz-json-1.1", + "x-amz-target": "Kinesis_20131202.UpdateStreamMode", + }; + let body: any; + body = JSON.stringify(serializeAws_json1_1UpdateStreamModeInput(input, context)); + return buildHttpRpcRequest(context, headers, "/", undefined, body); +}; + export const deserializeAws_json1_1AddTagsToStreamCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -1905,6 +1922,14 @@ const deserializeAws_json1_1MergeShardsCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ValidationException": + case "com.amazonaws.kinesis#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -2369,6 +2394,14 @@ const deserializeAws_json1_1SplitShardCommandError = async ( $metadata: deserializeMetadata(output), }; break; + case "ValidationException": + case "com.amazonaws.kinesis#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; default: const parsedBody = parsedOutput.body; errorCode = parsedBody.code || parsedBody.Code || errorCode; @@ -2683,6 +2716,89 @@ const deserializeAws_json1_1UpdateShardCountCommandError = async ( output: __HttpResponse, context: __SerdeContext ): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidArgumentException": + case "com.amazonaws.kinesis#InvalidArgumentException": + response = { + ...(await deserializeAws_json1_1InvalidArgumentExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "LimitExceededException": + case "com.amazonaws.kinesis#LimitExceededException": + response = { + ...(await deserializeAws_json1_1LimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceInUseException": + case "com.amazonaws.kinesis#ResourceInUseException": + response = { + ...(await deserializeAws_json1_1ResourceInUseExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.kinesis#ResourceNotFoundException": + response = { + ...(await deserializeAws_json1_1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.kinesis#ValidationException": + response = { + ...(await deserializeAws_json1_1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_json1_1UpdateStreamModeCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode >= 300) { + return deserializeAws_json1_1UpdateStreamModeCommandError(output, context); + } + await collectBody(output.body, context); + const response: UpdateStreamModeCommandOutput = { + $metadata: deserializeMetadata(output), + }; + return Promise.resolve(response); +}; + +const deserializeAws_json1_1UpdateStreamModeCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2935,6 +3051,21 @@ const deserializeAws_json1_1ResourceNotFoundExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1ValidationException(body, context); + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const serializeAws_json1_1AddTagsToStreamInput = (input: AddTagsToStreamInput, context: __SerdeContext): any => { return { ...(input.StreamName !== undefined && input.StreamName !== null && { StreamName: input.StreamName }), @@ -2945,6 +3076,10 @@ const serializeAws_json1_1AddTagsToStreamInput = (input: AddTagsToStreamInput, c const serializeAws_json1_1CreateStreamInput = (input: CreateStreamInput, context: __SerdeContext): any => { return { ...(input.ShardCount !== undefined && input.ShardCount !== null && { ShardCount: input.ShardCount }), + ...(input.StreamModeDetails !== undefined && + input.StreamModeDetails !== null && { + StreamModeDetails: serializeAws_json1_1StreamModeDetails(input.StreamModeDetails, context), + }), ...(input.StreamName !== undefined && input.StreamName !== null && { StreamName: input.StreamName }), }; }; @@ -3253,6 +3388,12 @@ const serializeAws_json1_1StopStreamEncryptionInput = ( }; }; +const serializeAws_json1_1StreamModeDetails = (input: StreamModeDetails, context: __SerdeContext): any => { + return { + ...(input.StreamMode !== undefined && input.StreamMode !== null && { StreamMode: input.StreamMode }), + }; +}; + const serializeAws_json1_1SubscribeToShardInput = (input: SubscribeToShardInput, context: __SerdeContext): any => { return { ...(input.ConsumerARN !== undefined && input.ConsumerARN !== null && { ConsumerARN: input.ConsumerARN }), @@ -3296,6 +3437,16 @@ const serializeAws_json1_1UpdateShardCountInput = (input: UpdateShardCountInput, }; }; +const serializeAws_json1_1UpdateStreamModeInput = (input: UpdateStreamModeInput, context: __SerdeContext): any => { + return { + ...(input.StreamARN !== undefined && input.StreamARN !== null && { StreamARN: input.StreamARN }), + ...(input.StreamModeDetails !== undefined && + input.StreamModeDetails !== null && { + StreamModeDetails: serializeAws_json1_1StreamModeDetails(input.StreamModeDetails, context), + }), + }; +}; + const deserializeAws_json1_1ChildShard = (output: any, context: __SerdeContext): ChildShard => { return { HashKeyRange: @@ -3359,6 +3510,8 @@ const deserializeAws_json1_1ConsumerList = (output: any, context: __SerdeContext const deserializeAws_json1_1DescribeLimitsOutput = (output: any, context: __SerdeContext): DescribeLimitsOutput => { return { + OnDemandStreamCount: __expectInt32(output.OnDemandStreamCount), + OnDemandStreamCountLimit: __expectInt32(output.OnDemandStreamCountLimit), OpenShardCount: __expectInt32(output.OpenShardCount), ShardLimit: __expectInt32(output.ShardLimit), } as any; @@ -3769,6 +3922,10 @@ const deserializeAws_json1_1StreamDescription = (output: any, context: __SerdeCo output.StreamCreationTimestamp !== undefined && output.StreamCreationTimestamp !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.StreamCreationTimestamp))) : undefined, + StreamModeDetails: + output.StreamModeDetails !== undefined && output.StreamModeDetails !== null + ? deserializeAws_json1_1StreamModeDetails(output.StreamModeDetails, context) + : undefined, StreamName: __expectString(output.StreamName), StreamStatus: __expectString(output.StreamStatus), } as any; @@ -3793,11 +3950,21 @@ const deserializeAws_json1_1StreamDescriptionSummary = ( output.StreamCreationTimestamp !== undefined && output.StreamCreationTimestamp !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.StreamCreationTimestamp))) : undefined, + StreamModeDetails: + output.StreamModeDetails !== undefined && output.StreamModeDetails !== null + ? deserializeAws_json1_1StreamModeDetails(output.StreamModeDetails, context) + : undefined, StreamName: __expectString(output.StreamName), StreamStatus: __expectString(output.StreamStatus), } as any; }; +const deserializeAws_json1_1StreamModeDetails = (output: any, context: __SerdeContext): StreamModeDetails => { + return { + StreamMode: __expectString(output.StreamMode), + } as any; +}; + const deserializeAws_json1_1StreamNameList = (output: any, context: __SerdeContext): string[] => { return (output || []) .filter((e: any) => e != null) @@ -3928,6 +4095,12 @@ const deserializeAws_json1_1UpdateShardCountOutput = (output: any, context: __Se } as any; }; +const deserializeAws_json1_1ValidationException = (output: any, context: __SerdeContext): ValidationException => { + return { + message: __expectString(output.message), + } as any; +}; + const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ httpStatusCode: output.statusCode, requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], diff --git a/clients/client-lakeformation/README.md b/clients/client-lakeformation/README.md index 7d16513320c3..c4429e5c3004 100644 --- a/clients/client-lakeformation/README.md +++ b/clients/client-lakeformation/README.md @@ -7,9 +7,9 @@ AWS SDK for JavaScript LakeFormation Client for Node.js, Browser and React Native. -AWS Lake Formation +Lake Formation -

                                  Defines the public endpoint for the AWS Lake Formation service.

                                  +

                                  Defines the public endpoint for the Lake Formation service.

                                  ## Installing diff --git a/clients/client-lakeformation/src/LakeFormation.ts b/clients/client-lakeformation/src/LakeFormation.ts index 95d9cc662d5a..3a2280176360 100644 --- a/clients/client-lakeformation/src/LakeFormation.ts +++ b/clients/client-lakeformation/src/LakeFormation.ts @@ -15,8 +15,33 @@ import { BatchRevokePermissionsCommandInput, BatchRevokePermissionsCommandOutput, } from "./commands/BatchRevokePermissionsCommand"; +import { + CancelTransactionCommand, + CancelTransactionCommandInput, + CancelTransactionCommandOutput, +} from "./commands/CancelTransactionCommand"; +import { + CommitTransactionCommand, + CommitTransactionCommandInput, + CommitTransactionCommandOutput, +} from "./commands/CommitTransactionCommand"; +import { + CreateDataCellsFilterCommand, + CreateDataCellsFilterCommandInput, + CreateDataCellsFilterCommandOutput, +} from "./commands/CreateDataCellsFilterCommand"; import { CreateLFTagCommand, CreateLFTagCommandInput, CreateLFTagCommandOutput } from "./commands/CreateLFTagCommand"; +import { + DeleteDataCellsFilterCommand, + DeleteDataCellsFilterCommandInput, + DeleteDataCellsFilterCommandOutput, +} from "./commands/DeleteDataCellsFilterCommand"; import { DeleteLFTagCommand, DeleteLFTagCommandInput, DeleteLFTagCommandOutput } from "./commands/DeleteLFTagCommand"; +import { + DeleteObjectsOnCancelCommand, + DeleteObjectsOnCancelCommandInput, + DeleteObjectsOnCancelCommandOutput, +} from "./commands/DeleteObjectsOnCancelCommand"; import { DeregisterResourceCommand, DeregisterResourceCommandInput, @@ -27,6 +52,16 @@ import { DescribeResourceCommandInput, DescribeResourceCommandOutput, } from "./commands/DescribeResourceCommand"; +import { + DescribeTransactionCommand, + DescribeTransactionCommandInput, + DescribeTransactionCommandOutput, +} from "./commands/DescribeTransactionCommand"; +import { + ExtendTransactionCommand, + ExtendTransactionCommandInput, + ExtendTransactionCommandOutput, +} from "./commands/ExtendTransactionCommand"; import { GetDataLakeSettingsCommand, GetDataLakeSettingsCommandInput, @@ -38,16 +73,46 @@ import { GetEffectivePermissionsForPathCommandOutput, } from "./commands/GetEffectivePermissionsForPathCommand"; import { GetLFTagCommand, GetLFTagCommandInput, GetLFTagCommandOutput } from "./commands/GetLFTagCommand"; +import { + GetQueryStateCommand, + GetQueryStateCommandInput, + GetQueryStateCommandOutput, +} from "./commands/GetQueryStateCommand"; +import { + GetQueryStatisticsCommand, + GetQueryStatisticsCommandInput, + GetQueryStatisticsCommandOutput, +} from "./commands/GetQueryStatisticsCommand"; import { GetResourceLFTagsCommand, GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput, } from "./commands/GetResourceLFTagsCommand"; +import { + GetTableObjectsCommand, + GetTableObjectsCommandInput, + GetTableObjectsCommandOutput, +} from "./commands/GetTableObjectsCommand"; +import { + GetWorkUnitResultsCommand, + GetWorkUnitResultsCommandInput, + GetWorkUnitResultsCommandOutput, +} from "./commands/GetWorkUnitResultsCommand"; +import { + GetWorkUnitsCommand, + GetWorkUnitsCommandInput, + GetWorkUnitsCommandOutput, +} from "./commands/GetWorkUnitsCommand"; import { GrantPermissionsCommand, GrantPermissionsCommandInput, GrantPermissionsCommandOutput, } from "./commands/GrantPermissionsCommand"; +import { + ListDataCellsFilterCommand, + ListDataCellsFilterCommandInput, + ListDataCellsFilterCommandOutput, +} from "./commands/ListDataCellsFilterCommand"; import { ListLFTagsCommand, ListLFTagsCommandInput, ListLFTagsCommandOutput } from "./commands/ListLFTagsCommand"; import { ListPermissionsCommand, @@ -59,6 +124,16 @@ import { ListResourcesCommandInput, ListResourcesCommandOutput, } from "./commands/ListResourcesCommand"; +import { + ListTableStorageOptimizersCommand, + ListTableStorageOptimizersCommandInput, + ListTableStorageOptimizersCommandOutput, +} from "./commands/ListTableStorageOptimizersCommand"; +import { + ListTransactionsCommand, + ListTransactionsCommandInput, + ListTransactionsCommandOutput, +} from "./commands/ListTransactionsCommand"; import { PutDataLakeSettingsCommand, PutDataLakeSettingsCommandInput, @@ -89,21 +164,41 @@ import { SearchTablesByLFTagsCommandInput, SearchTablesByLFTagsCommandOutput, } from "./commands/SearchTablesByLFTagsCommand"; +import { + StartQueryPlanningCommand, + StartQueryPlanningCommandInput, + StartQueryPlanningCommandOutput, +} from "./commands/StartQueryPlanningCommand"; +import { + StartTransactionCommand, + StartTransactionCommandInput, + StartTransactionCommandOutput, +} from "./commands/StartTransactionCommand"; import { UpdateLFTagCommand, UpdateLFTagCommandInput, UpdateLFTagCommandOutput } from "./commands/UpdateLFTagCommand"; import { UpdateResourceCommand, UpdateResourceCommandInput, UpdateResourceCommandOutput, } from "./commands/UpdateResourceCommand"; +import { + UpdateTableObjectsCommand, + UpdateTableObjectsCommandInput, + UpdateTableObjectsCommandOutput, +} from "./commands/UpdateTableObjectsCommand"; +import { + UpdateTableStorageOptimizerCommand, + UpdateTableStorageOptimizerCommandInput, + UpdateTableStorageOptimizerCommandOutput, +} from "./commands/UpdateTableStorageOptimizerCommand"; import { LakeFormationClient } from "./LakeFormationClient"; /** - * AWS Lake Formation - *

                                  Defines the public endpoint for the AWS Lake Formation service.

                                  + * Lake Formation + *

                                  Defines the public endpoint for the Lake Formation service.

                                  */ export class LakeFormation extends LakeFormationClient { /** - *

                                  Attaches one or more tags to an existing resource.

                                  + *

                                  Attaches one or more LF-tags to an existing resource.

                                  */ public addLFTagsToResource( args: AddLFTagsToResourceCommandInput, @@ -199,7 +294,103 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Creates a tag with the specified name and values.

                                  + *

                                  Attempts to cancel the specified transaction. Returns an exception if the transaction was previously committed.

                                  + */ + public cancelTransaction( + args: CancelTransactionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public cancelTransaction( + args: CancelTransactionCommandInput, + cb: (err: any, data?: CancelTransactionCommandOutput) => void + ): void; + public cancelTransaction( + args: CancelTransactionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CancelTransactionCommandOutput) => void + ): void; + public cancelTransaction( + args: CancelTransactionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CancelTransactionCommandOutput) => void), + cb?: (err: any, data?: CancelTransactionCommandOutput) => void + ): Promise | void { + const command = new CancelTransactionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Attempts to commit the specified transaction. Returns an exception if the transaction was previously aborted. This API action is idempotent if called multiple times for the same transaction.

                                  + */ + public commitTransaction( + args: CommitTransactionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public commitTransaction( + args: CommitTransactionCommandInput, + cb: (err: any, data?: CommitTransactionCommandOutput) => void + ): void; + public commitTransaction( + args: CommitTransactionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CommitTransactionCommandOutput) => void + ): void; + public commitTransaction( + args: CommitTransactionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CommitTransactionCommandOutput) => void), + cb?: (err: any, data?: CommitTransactionCommandOutput) => void + ): Promise | void { + const command = new CommitTransactionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates a data cell filter to allow one to grant access to certain columns on certain rows.

                                  + */ + public createDataCellsFilter( + args: CreateDataCellsFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createDataCellsFilter( + args: CreateDataCellsFilterCommandInput, + cb: (err: any, data?: CreateDataCellsFilterCommandOutput) => void + ): void; + public createDataCellsFilter( + args: CreateDataCellsFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateDataCellsFilterCommandOutput) => void + ): void; + public createDataCellsFilter( + args: CreateDataCellsFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateDataCellsFilterCommandOutput) => void), + cb?: (err: any, data?: CreateDataCellsFilterCommandOutput) => void + ): Promise | void { + const command = new CreateDataCellsFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Creates an LF-tag with the specified name and values.

                                  */ public createLFTag(args: CreateLFTagCommandInput, options?: __HttpHandlerOptions): Promise; public createLFTag(args: CreateLFTagCommandInput, cb: (err: any, data?: CreateLFTagCommandOutput) => void): void; @@ -225,7 +416,39 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Deletes the specified tag key name. If the attribute key does not exist or the tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the tag key is still attached with resources. You can consider untagging resources with this tag key.

                                  + *

                                  Deletes a data cell filter.

                                  + */ + public deleteDataCellsFilter( + args: DeleteDataCellsFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteDataCellsFilter( + args: DeleteDataCellsFilterCommandInput, + cb: (err: any, data?: DeleteDataCellsFilterCommandOutput) => void + ): void; + public deleteDataCellsFilter( + args: DeleteDataCellsFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteDataCellsFilterCommandOutput) => void + ): void; + public deleteDataCellsFilter( + args: DeleteDataCellsFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteDataCellsFilterCommandOutput) => void), + cb?: (err: any, data?: DeleteDataCellsFilterCommandOutput) => void + ): Promise | void { + const command = new DeleteDataCellsFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes the specified LF-tag key name. If the attribute key does not exist or the LF-tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the LF-tag key is still attached with resources. You can consider untagging resources with this LF-tag key.

                                  */ public deleteLFTag(args: DeleteLFTagCommandInput, options?: __HttpHandlerOptions): Promise; public deleteLFTag(args: DeleteLFTagCommandInput, cb: (err: any, data?: DeleteLFTagCommandOutput) => void): void; @@ -250,6 +473,45 @@ export class LakeFormation extends LakeFormationClient { } } + /** + *

                                  For a specific governed table, provides a list of Amazon S3 objects that will be written during the current transaction and that can be automatically deleted + * if the transaction is canceled. Without this call, no Amazon S3 objects are automatically deleted when a transaction cancels. + *

                                  + *

                                  + * The Glue ETL library function write_dynamic_frame.from_catalog() includes an option to automatically + * call DeleteObjectsOnCancel before writes. For more information, see + * Rolling Back Amazon S3 Writes. + *

                                  + */ + public deleteObjectsOnCancel( + args: DeleteObjectsOnCancelCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteObjectsOnCancel( + args: DeleteObjectsOnCancelCommandInput, + cb: (err: any, data?: DeleteObjectsOnCancelCommandOutput) => void + ): void; + public deleteObjectsOnCancel( + args: DeleteObjectsOnCancelCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteObjectsOnCancelCommandOutput) => void + ): void; + public deleteObjectsOnCancel( + args: DeleteObjectsOnCancelCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteObjectsOnCancelCommandOutput) => void), + cb?: (err: any, data?: DeleteObjectsOnCancelCommandOutput) => void + ): Promise | void { + const command = new DeleteObjectsOnCancelCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Deregisters the resource as managed by the Data Catalog.

                                  * @@ -285,7 +547,7 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Retrieves the current data access role for the given resource registered in AWS Lake Formation.

                                  + *

                                  Retrieves the current data access role for the given resource registered in Lake Formation.

                                  */ public describeResource( args: DescribeResourceCommandInput, @@ -316,6 +578,72 @@ export class LakeFormation extends LakeFormationClient { } } + /** + *

                                  Returns the details of a single transaction.

                                  + */ + public describeTransaction( + args: DescribeTransactionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public describeTransaction( + args: DescribeTransactionCommandInput, + cb: (err: any, data?: DescribeTransactionCommandOutput) => void + ): void; + public describeTransaction( + args: DescribeTransactionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DescribeTransactionCommandOutput) => void + ): void; + public describeTransaction( + args: DescribeTransactionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DescribeTransactionCommandOutput) => void), + cb?: (err: any, data?: DescribeTransactionCommandOutput) => void + ): Promise | void { + const command = new DescribeTransactionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Indicates to the service that the specified transaction is still active and should not be treated as idle and aborted.

                                  + * + *

                                  Write transactions that remain idle for a long period are automatically aborted unless explicitly extended.

                                  + */ + public extendTransaction( + args: ExtendTransactionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public extendTransaction( + args: ExtendTransactionCommandInput, + cb: (err: any, data?: ExtendTransactionCommandOutput) => void + ): void; + public extendTransaction( + args: ExtendTransactionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ExtendTransactionCommandOutput) => void + ): void; + public extendTransaction( + args: ExtendTransactionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ExtendTransactionCommandOutput) => void), + cb?: (err: any, data?: ExtendTransactionCommandOutput) => void + ): Promise | void { + const command = new ExtendTransactionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Retrieves the list of the data lake administrators of a Lake Formation-managed data lake.

                                  */ @@ -382,7 +710,7 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Returns a tag definition.

                                  + *

                                  Returns an LF-tag definition.

                                  */ public getLFTag(args: GetLFTagCommandInput, options?: __HttpHandlerOptions): Promise; public getLFTag(args: GetLFTagCommandInput, cb: (err: any, data?: GetLFTagCommandOutput) => void): void; @@ -408,7 +736,71 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Returns the tags applied to a resource.

                                  + *

                                  Returns the state of a query previously submitted. Clients are expected to poll GetQueryState to monitor the current state of the planning before retrieving the work units. A query state is only visible to the principal that made the initial call to StartQueryPlanning.

                                  + */ + public getQueryState( + args: GetQueryStateCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getQueryState( + args: GetQueryStateCommandInput, + cb: (err: any, data?: GetQueryStateCommandOutput) => void + ): void; + public getQueryState( + args: GetQueryStateCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetQueryStateCommandOutput) => void + ): void; + public getQueryState( + args: GetQueryStateCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetQueryStateCommandOutput) => void), + cb?: (err: any, data?: GetQueryStateCommandOutput) => void + ): Promise | void { + const command = new GetQueryStateCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves statistics on the planning and execution of a query.

                                  + */ + public getQueryStatistics( + args: GetQueryStatisticsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getQueryStatistics( + args: GetQueryStatisticsCommandInput, + cb: (err: any, data?: GetQueryStatisticsCommandOutput) => void + ): void; + public getQueryStatistics( + args: GetQueryStatisticsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetQueryStatisticsCommandOutput) => void + ): void; + public getQueryStatistics( + args: GetQueryStatisticsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetQueryStatisticsCommandOutput) => void), + cb?: (err: any, data?: GetQueryStatisticsCommandOutput) => void + ): Promise | void { + const command = new GetQueryStatisticsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the LF-tags applied to a resource.

                                  */ public getResourceLFTags( args: GetResourceLFTagsCommandInput, @@ -439,6 +831,99 @@ export class LakeFormation extends LakeFormationClient { } } + /** + *

                                  Returns the set of Amazon S3 objects that make up the specified governed table. A transaction ID or timestamp can be specified for time-travel queries.

                                  + */ + public getTableObjects( + args: GetTableObjectsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getTableObjects( + args: GetTableObjectsCommandInput, + cb: (err: any, data?: GetTableObjectsCommandOutput) => void + ): void; + public getTableObjects( + args: GetTableObjectsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTableObjectsCommandOutput) => void + ): void; + public getTableObjects( + args: GetTableObjectsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTableObjectsCommandOutput) => void), + cb?: (err: any, data?: GetTableObjectsCommandOutput) => void + ): Promise | void { + const command = new GetTableObjectsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns the work units resulting from the query. Work units can be executed in any order and in parallel.

                                  + */ + public getWorkUnitResults( + args: GetWorkUnitResultsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getWorkUnitResults( + args: GetWorkUnitResultsCommandInput, + cb: (err: any, data?: GetWorkUnitResultsCommandOutput) => void + ): void; + public getWorkUnitResults( + args: GetWorkUnitResultsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetWorkUnitResultsCommandOutput) => void + ): void; + public getWorkUnitResults( + args: GetWorkUnitResultsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetWorkUnitResultsCommandOutput) => void), + cb?: (err: any, data?: GetWorkUnitResultsCommandOutput) => void + ): Promise | void { + const command = new GetWorkUnitResultsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves the work units generated by the StartQueryPlanning operation.

                                  + */ + public getWorkUnits( + args: GetWorkUnitsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getWorkUnits(args: GetWorkUnitsCommandInput, cb: (err: any, data?: GetWorkUnitsCommandOutput) => void): void; + public getWorkUnits( + args: GetWorkUnitsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetWorkUnitsCommandOutput) => void + ): void; + public getWorkUnits( + args: GetWorkUnitsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetWorkUnitsCommandOutput) => void), + cb?: (err: any, data?: GetWorkUnitsCommandOutput) => void + ): Promise | void { + const command = new GetWorkUnitsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

                                  *

                                  For information about permissions, see Security and Access Control to Metadata and Data.

                                  @@ -473,7 +958,39 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Lists tags that the requester has permission to view.

                                  + *

                                  Lists all the data cell filters on a table.

                                  + */ + public listDataCellsFilter( + args: ListDataCellsFilterCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listDataCellsFilter( + args: ListDataCellsFilterCommandInput, + cb: (err: any, data?: ListDataCellsFilterCommandOutput) => void + ): void; + public listDataCellsFilter( + args: ListDataCellsFilterCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListDataCellsFilterCommandOutput) => void + ): void; + public listDataCellsFilter( + args: ListDataCellsFilterCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListDataCellsFilterCommandOutput) => void), + cb?: (err: any, data?: ListDataCellsFilterCommandOutput) => void + ): Promise | void { + const command = new ListDataCellsFilterCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists LF-tags that the requester has permission to view.

                                  */ public listLFTags(args: ListLFTagsCommandInput, options?: __HttpHandlerOptions): Promise; public listLFTags(args: ListLFTagsCommandInput, cb: (err: any, data?: ListLFTagsCommandOutput) => void): void; @@ -564,6 +1081,71 @@ export class LakeFormation extends LakeFormationClient { } } + /** + *

                                  Returns the configuration of all storage optimizers associated with a specified table.

                                  + */ + public listTableStorageOptimizers( + args: ListTableStorageOptimizersCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTableStorageOptimizers( + args: ListTableStorageOptimizersCommandInput, + cb: (err: any, data?: ListTableStorageOptimizersCommandOutput) => void + ): void; + public listTableStorageOptimizers( + args: ListTableStorageOptimizersCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTableStorageOptimizersCommandOutput) => void + ): void; + public listTableStorageOptimizers( + args: ListTableStorageOptimizersCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTableStorageOptimizersCommandOutput) => void), + cb?: (err: any, data?: ListTableStorageOptimizersCommandOutput) => void + ): Promise | void { + const command = new ListTableStorageOptimizersCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns metadata about transactions and their status. To prevent the response from growing indefinitely, only uncommitted transactions and those available for time-travel queries are returned.

                                  + *

                                  This operation can help you identify uncommitted transactions or to get information about transactions.

                                  + */ + public listTransactions( + args: ListTransactionsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTransactions( + args: ListTransactionsCommandInput, + cb: (err: any, data?: ListTransactionsCommandOutput) => void + ): void; + public listTransactions( + args: ListTransactionsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTransactionsCommandOutput) => void + ): void; + public listTransactions( + args: ListTransactionsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTransactionsCommandOutput) => void), + cb?: (err: any, data?: ListTransactionsCommandOutput) => void + ): Promise | void { + const command = new ListTransactionsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                  Sets the list of data lake administrators who have admin privileges on all resources managed by Lake Formation. For more information on admin privileges, see Granting Lake Formation Permissions.

                                  * @@ -601,9 +1183,9 @@ export class LakeFormation extends LakeFormationClient { /** *

                                  Registers the resource as managed by the Data Catalog.

                                  * - *

                                  To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                  + *

                                  To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                  * - *

                                  The following request registers a new location and gives AWS Lake Formation permission to use the service-linked role to access that location.

                                  + *

                                  The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location.

                                  * *

                                  * ResourceArn = arn:aws:s3:::my-bucket @@ -646,7 +1228,7 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Removes a tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                  + *

                                  Removes an LF-tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                  */ public removeLFTagsFromResource( args: RemoveLFTagsFromResourceCommandInput, @@ -742,7 +1324,7 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LFTags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                  + *

                                  This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LF-tags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                  */ public searchTablesByLFTags( args: SearchTablesByLFTagsCommandInput, @@ -774,7 +1356,73 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Updates the list of possible values for the specified tag key. If the tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - "Update not allowed". Untag the attribute before deleting the tag key's value.

                                  + *

                                  Submits a request to process a query statement.

                                  + * + *

                                  This operation generates work units that can be retrieved with the GetWorkUnits operation as soon as the query state is WORKUNITS_AVAILABLE or FINISHED.

                                  + */ + public startQueryPlanning( + args: StartQueryPlanningCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public startQueryPlanning( + args: StartQueryPlanningCommandInput, + cb: (err: any, data?: StartQueryPlanningCommandOutput) => void + ): void; + public startQueryPlanning( + args: StartQueryPlanningCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartQueryPlanningCommandOutput) => void + ): void; + public startQueryPlanning( + args: StartQueryPlanningCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartQueryPlanningCommandOutput) => void), + cb?: (err: any, data?: StartQueryPlanningCommandOutput) => void + ): Promise | void { + const command = new StartQueryPlanningCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Starts a new transaction and returns its transaction ID. Transaction IDs are opaque objects that you can use to identify a transaction.

                                  + */ + public startTransaction( + args: StartTransactionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public startTransaction( + args: StartTransactionCommandInput, + cb: (err: any, data?: StartTransactionCommandOutput) => void + ): void; + public startTransaction( + args: StartTransactionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: StartTransactionCommandOutput) => void + ): void; + public startTransaction( + args: StartTransactionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: StartTransactionCommandOutput) => void), + cb?: (err: any, data?: StartTransactionCommandOutput) => void + ): Promise | void { + const command = new StartTransactionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the list of possible values for the specified LF-tag key. If the LF-tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - "Update not allowed". Untag the attribute before deleting the LF-tag key's value.

                                  */ public updateLFTag(args: UpdateLFTagCommandInput, options?: __HttpHandlerOptions): Promise; public updateLFTag(args: UpdateLFTagCommandInput, cb: (err: any, data?: UpdateLFTagCommandOutput) => void): void; @@ -800,7 +1448,7 @@ export class LakeFormation extends LakeFormationClient { } /** - *

                                  Updates the data access role used for vending access to the given (registered) resource in AWS Lake Formation.

                                  + *

                                  Updates the data access role used for vending access to the given (registered) resource in Lake Formation.

                                  */ public updateResource( args: UpdateResourceCommandInput, @@ -830,4 +1478,68 @@ export class LakeFormation extends LakeFormationClient { return this.send(command, optionsOrCb); } } + + /** + *

                                  Updates the manifest of Amazon S3 objects that make up the specified governed table.

                                  + */ + public updateTableObjects( + args: UpdateTableObjectsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateTableObjects( + args: UpdateTableObjectsCommandInput, + cb: (err: any, data?: UpdateTableObjectsCommandOutput) => void + ): void; + public updateTableObjects( + args: UpdateTableObjectsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateTableObjectsCommandOutput) => void + ): void; + public updateTableObjects( + args: UpdateTableObjectsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateTableObjectsCommandOutput) => void), + cb?: (err: any, data?: UpdateTableObjectsCommandOutput) => void + ): Promise | void { + const command = new UpdateTableObjectsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the configuration of the storage optimizers for a table.

                                  + */ + public updateTableStorageOptimizer( + args: UpdateTableStorageOptimizerCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateTableStorageOptimizer( + args: UpdateTableStorageOptimizerCommandInput, + cb: (err: any, data?: UpdateTableStorageOptimizerCommandOutput) => void + ): void; + public updateTableStorageOptimizer( + args: UpdateTableStorageOptimizerCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateTableStorageOptimizerCommandOutput) => void + ): void; + public updateTableStorageOptimizer( + args: UpdateTableStorageOptimizerCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateTableStorageOptimizerCommandOutput) => void), + cb?: (err: any, data?: UpdateTableStorageOptimizerCommandOutput) => void + ): Promise | void { + const command = new UpdateTableStorageOptimizerCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } } diff --git a/clients/client-lakeformation/src/LakeFormationClient.ts b/clients/client-lakeformation/src/LakeFormationClient.ts index fba9973cd3b5..e88729e6004a 100644 --- a/clients/client-lakeformation/src/LakeFormationClient.ts +++ b/clients/client-lakeformation/src/LakeFormationClient.ts @@ -61,10 +61,29 @@ import { BatchRevokePermissionsCommandInput, BatchRevokePermissionsCommandOutput, } from "./commands/BatchRevokePermissionsCommand"; +import { CancelTransactionCommandInput, CancelTransactionCommandOutput } from "./commands/CancelTransactionCommand"; +import { CommitTransactionCommandInput, CommitTransactionCommandOutput } from "./commands/CommitTransactionCommand"; +import { + CreateDataCellsFilterCommandInput, + CreateDataCellsFilterCommandOutput, +} from "./commands/CreateDataCellsFilterCommand"; import { CreateLFTagCommandInput, CreateLFTagCommandOutput } from "./commands/CreateLFTagCommand"; +import { + DeleteDataCellsFilterCommandInput, + DeleteDataCellsFilterCommandOutput, +} from "./commands/DeleteDataCellsFilterCommand"; import { DeleteLFTagCommandInput, DeleteLFTagCommandOutput } from "./commands/DeleteLFTagCommand"; +import { + DeleteObjectsOnCancelCommandInput, + DeleteObjectsOnCancelCommandOutput, +} from "./commands/DeleteObjectsOnCancelCommand"; import { DeregisterResourceCommandInput, DeregisterResourceCommandOutput } from "./commands/DeregisterResourceCommand"; import { DescribeResourceCommandInput, DescribeResourceCommandOutput } from "./commands/DescribeResourceCommand"; +import { + DescribeTransactionCommandInput, + DescribeTransactionCommandOutput, +} from "./commands/DescribeTransactionCommand"; +import { ExtendTransactionCommandInput, ExtendTransactionCommandOutput } from "./commands/ExtendTransactionCommand"; import { GetDataLakeSettingsCommandInput, GetDataLakeSettingsCommandOutput, @@ -74,11 +93,25 @@ import { GetEffectivePermissionsForPathCommandOutput, } from "./commands/GetEffectivePermissionsForPathCommand"; import { GetLFTagCommandInput, GetLFTagCommandOutput } from "./commands/GetLFTagCommand"; +import { GetQueryStateCommandInput, GetQueryStateCommandOutput } from "./commands/GetQueryStateCommand"; +import { GetQueryStatisticsCommandInput, GetQueryStatisticsCommandOutput } from "./commands/GetQueryStatisticsCommand"; import { GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput } from "./commands/GetResourceLFTagsCommand"; +import { GetTableObjectsCommandInput, GetTableObjectsCommandOutput } from "./commands/GetTableObjectsCommand"; +import { GetWorkUnitResultsCommandInput, GetWorkUnitResultsCommandOutput } from "./commands/GetWorkUnitResultsCommand"; +import { GetWorkUnitsCommandInput, GetWorkUnitsCommandOutput } from "./commands/GetWorkUnitsCommand"; import { GrantPermissionsCommandInput, GrantPermissionsCommandOutput } from "./commands/GrantPermissionsCommand"; +import { + ListDataCellsFilterCommandInput, + ListDataCellsFilterCommandOutput, +} from "./commands/ListDataCellsFilterCommand"; import { ListLFTagsCommandInput, ListLFTagsCommandOutput } from "./commands/ListLFTagsCommand"; import { ListPermissionsCommandInput, ListPermissionsCommandOutput } from "./commands/ListPermissionsCommand"; import { ListResourcesCommandInput, ListResourcesCommandOutput } from "./commands/ListResourcesCommand"; +import { + ListTableStorageOptimizersCommandInput, + ListTableStorageOptimizersCommandOutput, +} from "./commands/ListTableStorageOptimizersCommand"; +import { ListTransactionsCommandInput, ListTransactionsCommandOutput } from "./commands/ListTransactionsCommand"; import { PutDataLakeSettingsCommandInput, PutDataLakeSettingsCommandOutput, @@ -97,59 +130,104 @@ import { SearchTablesByLFTagsCommandInput, SearchTablesByLFTagsCommandOutput, } from "./commands/SearchTablesByLFTagsCommand"; +import { StartQueryPlanningCommandInput, StartQueryPlanningCommandOutput } from "./commands/StartQueryPlanningCommand"; +import { StartTransactionCommandInput, StartTransactionCommandOutput } from "./commands/StartTransactionCommand"; import { UpdateLFTagCommandInput, UpdateLFTagCommandOutput } from "./commands/UpdateLFTagCommand"; import { UpdateResourceCommandInput, UpdateResourceCommandOutput } from "./commands/UpdateResourceCommand"; +import { UpdateTableObjectsCommandInput, UpdateTableObjectsCommandOutput } from "./commands/UpdateTableObjectsCommand"; +import { + UpdateTableStorageOptimizerCommandInput, + UpdateTableStorageOptimizerCommandOutput, +} from "./commands/UpdateTableStorageOptimizerCommand"; import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = | AddLFTagsToResourceCommandInput | BatchGrantPermissionsCommandInput | BatchRevokePermissionsCommandInput + | CancelTransactionCommandInput + | CommitTransactionCommandInput + | CreateDataCellsFilterCommandInput | CreateLFTagCommandInput + | DeleteDataCellsFilterCommandInput | DeleteLFTagCommandInput + | DeleteObjectsOnCancelCommandInput | DeregisterResourceCommandInput | DescribeResourceCommandInput + | DescribeTransactionCommandInput + | ExtendTransactionCommandInput | GetDataLakeSettingsCommandInput | GetEffectivePermissionsForPathCommandInput | GetLFTagCommandInput + | GetQueryStateCommandInput + | GetQueryStatisticsCommandInput | GetResourceLFTagsCommandInput + | GetTableObjectsCommandInput + | GetWorkUnitResultsCommandInput + | GetWorkUnitsCommandInput | GrantPermissionsCommandInput + | ListDataCellsFilterCommandInput | ListLFTagsCommandInput | ListPermissionsCommandInput | ListResourcesCommandInput + | ListTableStorageOptimizersCommandInput + | ListTransactionsCommandInput | PutDataLakeSettingsCommandInput | RegisterResourceCommandInput | RemoveLFTagsFromResourceCommandInput | RevokePermissionsCommandInput | SearchDatabasesByLFTagsCommandInput | SearchTablesByLFTagsCommandInput + | StartQueryPlanningCommandInput + | StartTransactionCommandInput | UpdateLFTagCommandInput - | UpdateResourceCommandInput; + | UpdateResourceCommandInput + | UpdateTableObjectsCommandInput + | UpdateTableStorageOptimizerCommandInput; export type ServiceOutputTypes = | AddLFTagsToResourceCommandOutput | BatchGrantPermissionsCommandOutput | BatchRevokePermissionsCommandOutput + | CancelTransactionCommandOutput + | CommitTransactionCommandOutput + | CreateDataCellsFilterCommandOutput | CreateLFTagCommandOutput + | DeleteDataCellsFilterCommandOutput | DeleteLFTagCommandOutput + | DeleteObjectsOnCancelCommandOutput | DeregisterResourceCommandOutput | DescribeResourceCommandOutput + | DescribeTransactionCommandOutput + | ExtendTransactionCommandOutput | GetDataLakeSettingsCommandOutput | GetEffectivePermissionsForPathCommandOutput | GetLFTagCommandOutput + | GetQueryStateCommandOutput + | GetQueryStatisticsCommandOutput | GetResourceLFTagsCommandOutput + | GetTableObjectsCommandOutput + | GetWorkUnitResultsCommandOutput + | GetWorkUnitsCommandOutput | GrantPermissionsCommandOutput + | ListDataCellsFilterCommandOutput | ListLFTagsCommandOutput | ListPermissionsCommandOutput | ListResourcesCommandOutput + | ListTableStorageOptimizersCommandOutput + | ListTransactionsCommandOutput | PutDataLakeSettingsCommandOutput | RegisterResourceCommandOutput | RemoveLFTagsFromResourceCommandOutput | RevokePermissionsCommandOutput | SearchDatabasesByLFTagsCommandOutput | SearchTablesByLFTagsCommandOutput + | StartQueryPlanningCommandOutput + | StartTransactionCommandOutput | UpdateLFTagCommandOutput - | UpdateResourceCommandOutput; + | UpdateResourceCommandOutput + | UpdateTableObjectsCommandOutput + | UpdateTableStorageOptimizerCommandOutput; export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { /** @@ -300,8 +378,8 @@ type LakeFormationClientResolvedConfigType = __SmithyResolvedConfiguration<__Htt export interface LakeFormationClientResolvedConfig extends LakeFormationClientResolvedConfigType {} /** - * AWS Lake Formation - *

                                  Defines the public endpoint for the AWS Lake Formation service.

                                  + * Lake Formation + *

                                  Defines the public endpoint for the Lake Formation service.

                                  */ export class LakeFormationClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-lakeformation/src/commands/AddLFTagsToResourceCommand.ts b/clients/client-lakeformation/src/commands/AddLFTagsToResourceCommand.ts index 3139b6106a11..126bb1639b4b 100644 --- a/clients/client-lakeformation/src/commands/AddLFTagsToResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/AddLFTagsToResourceCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { AddLFTagsToResourceRequest, AddLFTagsToResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1AddLFTagsToResourceCommand, - serializeAws_json1_1AddLFTagsToResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1AddLFTagsToResourceCommand, + serializeAws_restJson1AddLFTagsToResourceCommand, +} from "../protocols/Aws_restJson1"; export interface AddLFTagsToResourceCommandInput extends AddLFTagsToResourceRequest {} export interface AddLFTagsToResourceCommandOutput extends AddLFTagsToResourceResponse, __MetadataBearer {} /** - *

                                  Attaches one or more tags to an existing resource.

                                  + *

                                  Attaches one or more LF-tags to an existing resource.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class AddLFTagsToResourceCommand extends $Command< } private serialize(input: AddLFTagsToResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1AddLFTagsToResourceCommand(input, context); + return serializeAws_restJson1AddLFTagsToResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1AddLFTagsToResourceCommand(output, context); + return deserializeAws_restJson1AddLFTagsToResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/BatchGrantPermissionsCommand.ts b/clients/client-lakeformation/src/commands/BatchGrantPermissionsCommand.ts index 953171f38c4e..60d80b2b6da5 100644 --- a/clients/client-lakeformation/src/commands/BatchGrantPermissionsCommand.ts +++ b/clients/client-lakeformation/src/commands/BatchGrantPermissionsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { BatchGrantPermissionsRequest, BatchGrantPermissionsResponse } from "../models/models_0"; import { - deserializeAws_json1_1BatchGrantPermissionsCommand, - serializeAws_json1_1BatchGrantPermissionsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1BatchGrantPermissionsCommand, + serializeAws_restJson1BatchGrantPermissionsCommand, +} from "../protocols/Aws_restJson1"; export interface BatchGrantPermissionsCommandInput extends BatchGrantPermissionsRequest {} export interface BatchGrantPermissionsCommandOutput extends BatchGrantPermissionsResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class BatchGrantPermissionsCommand extends $Command< } private serialize(input: BatchGrantPermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1BatchGrantPermissionsCommand(input, context); + return serializeAws_restJson1BatchGrantPermissionsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1BatchGrantPermissionsCommand(output, context); + return deserializeAws_restJson1BatchGrantPermissionsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/BatchRevokePermissionsCommand.ts b/clients/client-lakeformation/src/commands/BatchRevokePermissionsCommand.ts index c22353835274..ed97da6d90c9 100644 --- a/clients/client-lakeformation/src/commands/BatchRevokePermissionsCommand.ts +++ b/clients/client-lakeformation/src/commands/BatchRevokePermissionsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { BatchRevokePermissionsRequest, BatchRevokePermissionsResponse } from "../models/models_0"; import { - deserializeAws_json1_1BatchRevokePermissionsCommand, - serializeAws_json1_1BatchRevokePermissionsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1BatchRevokePermissionsCommand, + serializeAws_restJson1BatchRevokePermissionsCommand, +} from "../protocols/Aws_restJson1"; export interface BatchRevokePermissionsCommandInput extends BatchRevokePermissionsRequest {} export interface BatchRevokePermissionsCommandOutput extends BatchRevokePermissionsResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class BatchRevokePermissionsCommand extends $Command< } private serialize(input: BatchRevokePermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1BatchRevokePermissionsCommand(input, context); + return serializeAws_restJson1BatchRevokePermissionsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1BatchRevokePermissionsCommand(output, context); + return deserializeAws_restJson1BatchRevokePermissionsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/CancelTransactionCommand.ts b/clients/client-lakeformation/src/commands/CancelTransactionCommand.ts new file mode 100644 index 000000000000..b2569e2a4c7a --- /dev/null +++ b/clients/client-lakeformation/src/commands/CancelTransactionCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { CancelTransactionRequest, CancelTransactionResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CancelTransactionCommand, + serializeAws_restJson1CancelTransactionCommand, +} from "../protocols/Aws_restJson1"; + +export interface CancelTransactionCommandInput extends CancelTransactionRequest {} +export interface CancelTransactionCommandOutput extends CancelTransactionResponse, __MetadataBearer {} + +/** + *

                                  Attempts to cancel the specified transaction. Returns an exception if the transaction was previously committed.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, CancelTransactionCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, CancelTransactionCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new CancelTransactionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CancelTransactionCommandInput} for command's `input` shape. + * @see {@link CancelTransactionCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class CancelTransactionCommand extends $Command< + CancelTransactionCommandInput, + CancelTransactionCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CancelTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "CancelTransactionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CancelTransactionRequest.filterSensitiveLog, + outputFilterSensitiveLog: CancelTransactionResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CancelTransactionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CancelTransactionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CancelTransactionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/CommitTransactionCommand.ts b/clients/client-lakeformation/src/commands/CommitTransactionCommand.ts new file mode 100644 index 000000000000..31cd1eb215e5 --- /dev/null +++ b/clients/client-lakeformation/src/commands/CommitTransactionCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { CommitTransactionRequest, CommitTransactionResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CommitTransactionCommand, + serializeAws_restJson1CommitTransactionCommand, +} from "../protocols/Aws_restJson1"; + +export interface CommitTransactionCommandInput extends CommitTransactionRequest {} +export interface CommitTransactionCommandOutput extends CommitTransactionResponse, __MetadataBearer {} + +/** + *

                                  Attempts to commit the specified transaction. Returns an exception if the transaction was previously aborted. This API action is idempotent if called multiple times for the same transaction.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, CommitTransactionCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, CommitTransactionCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new CommitTransactionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CommitTransactionCommandInput} for command's `input` shape. + * @see {@link CommitTransactionCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class CommitTransactionCommand extends $Command< + CommitTransactionCommandInput, + CommitTransactionCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CommitTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "CommitTransactionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CommitTransactionRequest.filterSensitiveLog, + outputFilterSensitiveLog: CommitTransactionResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CommitTransactionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CommitTransactionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CommitTransactionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/CreateDataCellsFilterCommand.ts b/clients/client-lakeformation/src/commands/CreateDataCellsFilterCommand.ts new file mode 100644 index 000000000000..293846cb95a9 --- /dev/null +++ b/clients/client-lakeformation/src/commands/CreateDataCellsFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { CreateDataCellsFilterRequest, CreateDataCellsFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateDataCellsFilterCommand, + serializeAws_restJson1CreateDataCellsFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface CreateDataCellsFilterCommandInput extends CreateDataCellsFilterRequest {} +export interface CreateDataCellsFilterCommandOutput extends CreateDataCellsFilterResponse, __MetadataBearer {} + +/** + *

                                  Creates a data cell filter to allow one to grant access to certain columns on certain rows.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, CreateDataCellsFilterCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, CreateDataCellsFilterCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new CreateDataCellsFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateDataCellsFilterCommandInput} for command's `input` shape. + * @see {@link CreateDataCellsFilterCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class CreateDataCellsFilterCommand extends $Command< + CreateDataCellsFilterCommandInput, + CreateDataCellsFilterCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateDataCellsFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "CreateDataCellsFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateDataCellsFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateDataCellsFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateDataCellsFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateDataCellsFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateDataCellsFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/CreateLFTagCommand.ts b/clients/client-lakeformation/src/commands/CreateLFTagCommand.ts index 2438cef9de82..78836990444e 100644 --- a/clients/client-lakeformation/src/commands/CreateLFTagCommand.ts +++ b/clients/client-lakeformation/src/commands/CreateLFTagCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { CreateLFTagRequest, CreateLFTagResponse } from "../models/models_0"; import { - deserializeAws_json1_1CreateLFTagCommand, - serializeAws_json1_1CreateLFTagCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1CreateLFTagCommand, + serializeAws_restJson1CreateLFTagCommand, +} from "../protocols/Aws_restJson1"; export interface CreateLFTagCommandInput extends CreateLFTagRequest {} export interface CreateLFTagCommandOutput extends CreateLFTagResponse, __MetadataBearer {} /** - *

                                  Creates a tag with the specified name and values.

                                  + *

                                  Creates an LF-tag with the specified name and values.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class CreateLFTagCommand extends $Command< } private serialize(input: CreateLFTagCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1CreateLFTagCommand(input, context); + return serializeAws_restJson1CreateLFTagCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1CreateLFTagCommand(output, context); + return deserializeAws_restJson1CreateLFTagCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/DeleteDataCellsFilterCommand.ts b/clients/client-lakeformation/src/commands/DeleteDataCellsFilterCommand.ts new file mode 100644 index 000000000000..a43f21d38881 --- /dev/null +++ b/clients/client-lakeformation/src/commands/DeleteDataCellsFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { DeleteDataCellsFilterRequest, DeleteDataCellsFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteDataCellsFilterCommand, + serializeAws_restJson1DeleteDataCellsFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteDataCellsFilterCommandInput extends DeleteDataCellsFilterRequest {} +export interface DeleteDataCellsFilterCommandOutput extends DeleteDataCellsFilterResponse, __MetadataBearer {} + +/** + *

                                  Deletes a data cell filter.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, DeleteDataCellsFilterCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, DeleteDataCellsFilterCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new DeleteDataCellsFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteDataCellsFilterCommandInput} for command's `input` shape. + * @see {@link DeleteDataCellsFilterCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class DeleteDataCellsFilterCommand extends $Command< + DeleteDataCellsFilterCommandInput, + DeleteDataCellsFilterCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteDataCellsFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "DeleteDataCellsFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteDataCellsFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteDataCellsFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteDataCellsFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteDataCellsFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteDataCellsFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/DeleteLFTagCommand.ts b/clients/client-lakeformation/src/commands/DeleteLFTagCommand.ts index 914cc0125dff..e6efbb0c9950 100644 --- a/clients/client-lakeformation/src/commands/DeleteLFTagCommand.ts +++ b/clients/client-lakeformation/src/commands/DeleteLFTagCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { DeleteLFTagRequest, DeleteLFTagResponse } from "../models/models_0"; import { - deserializeAws_json1_1DeleteLFTagCommand, - serializeAws_json1_1DeleteLFTagCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1DeleteLFTagCommand, + serializeAws_restJson1DeleteLFTagCommand, +} from "../protocols/Aws_restJson1"; export interface DeleteLFTagCommandInput extends DeleteLFTagRequest {} export interface DeleteLFTagCommandOutput extends DeleteLFTagResponse, __MetadataBearer {} /** - *

                                  Deletes the specified tag key name. If the attribute key does not exist or the tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the tag key is still attached with resources. You can consider untagging resources with this tag key.

                                  + *

                                  Deletes the specified LF-tag key name. If the attribute key does not exist or the LF-tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message "Delete not allowed" as the LF-tag key is still attached with resources. You can consider untagging resources with this LF-tag key.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class DeleteLFTagCommand extends $Command< } private serialize(input: DeleteLFTagCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1DeleteLFTagCommand(input, context); + return serializeAws_restJson1DeleteLFTagCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1DeleteLFTagCommand(output, context); + return deserializeAws_restJson1DeleteLFTagCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/DeleteObjectsOnCancelCommand.ts b/clients/client-lakeformation/src/commands/DeleteObjectsOnCancelCommand.ts new file mode 100644 index 000000000000..25f71f4a7d8f --- /dev/null +++ b/clients/client-lakeformation/src/commands/DeleteObjectsOnCancelCommand.ts @@ -0,0 +1,102 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { DeleteObjectsOnCancelRequest, DeleteObjectsOnCancelResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteObjectsOnCancelCommand, + serializeAws_restJson1DeleteObjectsOnCancelCommand, +} from "../protocols/Aws_restJson1"; + +export interface DeleteObjectsOnCancelCommandInput extends DeleteObjectsOnCancelRequest {} +export interface DeleteObjectsOnCancelCommandOutput extends DeleteObjectsOnCancelResponse, __MetadataBearer {} + +/** + *

                                  For a specific governed table, provides a list of Amazon S3 objects that will be written during the current transaction and that can be automatically deleted + * if the transaction is canceled. Without this call, no Amazon S3 objects are automatically deleted when a transaction cancels. + *

                                  + *

                                  + * The Glue ETL library function write_dynamic_frame.from_catalog() includes an option to automatically + * call DeleteObjectsOnCancel before writes. For more information, see + * Rolling Back Amazon S3 Writes. + *

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, DeleteObjectsOnCancelCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, DeleteObjectsOnCancelCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new DeleteObjectsOnCancelCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteObjectsOnCancelCommandInput} for command's `input` shape. + * @see {@link DeleteObjectsOnCancelCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class DeleteObjectsOnCancelCommand extends $Command< + DeleteObjectsOnCancelCommandInput, + DeleteObjectsOnCancelCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteObjectsOnCancelCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "DeleteObjectsOnCancelCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteObjectsOnCancelRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteObjectsOnCancelResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteObjectsOnCancelCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteObjectsOnCancelCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteObjectsOnCancelCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/DeregisterResourceCommand.ts b/clients/client-lakeformation/src/commands/DeregisterResourceCommand.ts index f085b88ee779..ce50d9dc3e09 100644 --- a/clients/client-lakeformation/src/commands/DeregisterResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/DeregisterResourceCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { DeregisterResourceRequest, DeregisterResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1DeregisterResourceCommand, - serializeAws_json1_1DeregisterResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1DeregisterResourceCommand, + serializeAws_restJson1DeregisterResourceCommand, +} from "../protocols/Aws_restJson1"; export interface DeregisterResourceCommandInput extends DeregisterResourceRequest {} export interface DeregisterResourceCommandOutput extends DeregisterResourceResponse, __MetadataBearer {} @@ -85,11 +85,11 @@ export class DeregisterResourceCommand extends $Command< } private serialize(input: DeregisterResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1DeregisterResourceCommand(input, context); + return serializeAws_restJson1DeregisterResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1DeregisterResourceCommand(output, context); + return deserializeAws_restJson1DeregisterResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/DescribeResourceCommand.ts b/clients/client-lakeformation/src/commands/DescribeResourceCommand.ts index cf56f8109ff8..be0cdd234a99 100644 --- a/clients/client-lakeformation/src/commands/DescribeResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/DescribeResourceCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { DescribeResourceRequest, DescribeResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1DescribeResourceCommand, - serializeAws_json1_1DescribeResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1DescribeResourceCommand, + serializeAws_restJson1DescribeResourceCommand, +} from "../protocols/Aws_restJson1"; export interface DescribeResourceCommandInput extends DescribeResourceRequest {} export interface DescribeResourceCommandOutput extends DescribeResourceResponse, __MetadataBearer {} /** - *

                                  Retrieves the current data access role for the given resource registered in AWS Lake Formation.

                                  + *

                                  Retrieves the current data access role for the given resource registered in Lake Formation.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class DescribeResourceCommand extends $Command< } private serialize(input: DescribeResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1DescribeResourceCommand(input, context); + return serializeAws_restJson1DescribeResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1DescribeResourceCommand(output, context); + return deserializeAws_restJson1DescribeResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/DescribeTransactionCommand.ts b/clients/client-lakeformation/src/commands/DescribeTransactionCommand.ts new file mode 100644 index 000000000000..b4ad83d30a91 --- /dev/null +++ b/clients/client-lakeformation/src/commands/DescribeTransactionCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { DescribeTransactionRequest, DescribeTransactionResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DescribeTransactionCommand, + serializeAws_restJson1DescribeTransactionCommand, +} from "../protocols/Aws_restJson1"; + +export interface DescribeTransactionCommandInput extends DescribeTransactionRequest {} +export interface DescribeTransactionCommandOutput extends DescribeTransactionResponse, __MetadataBearer {} + +/** + *

                                  Returns the details of a single transaction.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, DescribeTransactionCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, DescribeTransactionCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new DescribeTransactionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DescribeTransactionCommandInput} for command's `input` shape. + * @see {@link DescribeTransactionCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class DescribeTransactionCommand extends $Command< + DescribeTransactionCommandInput, + DescribeTransactionCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DescribeTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "DescribeTransactionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DescribeTransactionRequest.filterSensitiveLog, + outputFilterSensitiveLog: DescribeTransactionResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DescribeTransactionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DescribeTransactionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DescribeTransactionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/ExtendTransactionCommand.ts b/clients/client-lakeformation/src/commands/ExtendTransactionCommand.ts new file mode 100644 index 000000000000..48cb37e3af17 --- /dev/null +++ b/clients/client-lakeformation/src/commands/ExtendTransactionCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { ExtendTransactionRequest, ExtendTransactionResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ExtendTransactionCommand, + serializeAws_restJson1ExtendTransactionCommand, +} from "../protocols/Aws_restJson1"; + +export interface ExtendTransactionCommandInput extends ExtendTransactionRequest {} +export interface ExtendTransactionCommandOutput extends ExtendTransactionResponse, __MetadataBearer {} + +/** + *

                                  Indicates to the service that the specified transaction is still active and should not be treated as idle and aborted.

                                  + * + *

                                  Write transactions that remain idle for a long period are automatically aborted unless explicitly extended.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, ExtendTransactionCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, ExtendTransactionCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new ExtendTransactionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ExtendTransactionCommandInput} for command's `input` shape. + * @see {@link ExtendTransactionCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class ExtendTransactionCommand extends $Command< + ExtendTransactionCommandInput, + ExtendTransactionCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ExtendTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "ExtendTransactionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ExtendTransactionRequest.filterSensitiveLog, + outputFilterSensitiveLog: ExtendTransactionResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ExtendTransactionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ExtendTransactionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ExtendTransactionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetDataLakeSettingsCommand.ts b/clients/client-lakeformation/src/commands/GetDataLakeSettingsCommand.ts index ae5a8e04d2de..8faf3a17e74e 100644 --- a/clients/client-lakeformation/src/commands/GetDataLakeSettingsCommand.ts +++ b/clients/client-lakeformation/src/commands/GetDataLakeSettingsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { GetDataLakeSettingsRequest, GetDataLakeSettingsResponse } from "../models/models_0"; import { - deserializeAws_json1_1GetDataLakeSettingsCommand, - serializeAws_json1_1GetDataLakeSettingsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1GetDataLakeSettingsCommand, + serializeAws_restJson1GetDataLakeSettingsCommand, +} from "../protocols/Aws_restJson1"; export interface GetDataLakeSettingsCommandInput extends GetDataLakeSettingsRequest {} export interface GetDataLakeSettingsCommandOutput extends GetDataLakeSettingsResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class GetDataLakeSettingsCommand extends $Command< } private serialize(input: GetDataLakeSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1GetDataLakeSettingsCommand(input, context); + return serializeAws_restJson1GetDataLakeSettingsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1GetDataLakeSettingsCommand(output, context); + return deserializeAws_restJson1GetDataLakeSettingsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/GetEffectivePermissionsForPathCommand.ts b/clients/client-lakeformation/src/commands/GetEffectivePermissionsForPathCommand.ts index 60cc6963e7e2..c16db6361af6 100644 --- a/clients/client-lakeformation/src/commands/GetEffectivePermissionsForPathCommand.ts +++ b/clients/client-lakeformation/src/commands/GetEffectivePermissionsForPathCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { GetEffectivePermissionsForPathRequest, GetEffectivePermissionsForPathResponse } from "../models/models_0"; import { - deserializeAws_json1_1GetEffectivePermissionsForPathCommand, - serializeAws_json1_1GetEffectivePermissionsForPathCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1GetEffectivePermissionsForPathCommand, + serializeAws_restJson1GetEffectivePermissionsForPathCommand, +} from "../protocols/Aws_restJson1"; export interface GetEffectivePermissionsForPathCommandInput extends GetEffectivePermissionsForPathRequest {} export interface GetEffectivePermissionsForPathCommandOutput @@ -89,14 +89,14 @@ export class GetEffectivePermissionsForPathCommand extends $Command< input: GetEffectivePermissionsForPathCommandInput, context: __SerdeContext ): Promise<__HttpRequest> { - return serializeAws_json1_1GetEffectivePermissionsForPathCommand(input, context); + return serializeAws_restJson1GetEffectivePermissionsForPathCommand(input, context); } private deserialize( output: __HttpResponse, context: __SerdeContext ): Promise { - return deserializeAws_json1_1GetEffectivePermissionsForPathCommand(output, context); + return deserializeAws_restJson1GetEffectivePermissionsForPathCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/GetLFTagCommand.ts b/clients/client-lakeformation/src/commands/GetLFTagCommand.ts index 98aa40a2e7f2..f3df6fc28500 100644 --- a/clients/client-lakeformation/src/commands/GetLFTagCommand.ts +++ b/clients/client-lakeformation/src/commands/GetLFTagCommand.ts @@ -13,13 +13,16 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { GetLFTagRequest, GetLFTagResponse } from "../models/models_0"; -import { deserializeAws_json1_1GetLFTagCommand, serializeAws_json1_1GetLFTagCommand } from "../protocols/Aws_json1_1"; +import { + deserializeAws_restJson1GetLFTagCommand, + serializeAws_restJson1GetLFTagCommand, +} from "../protocols/Aws_restJson1"; export interface GetLFTagCommandInput extends GetLFTagRequest {} export interface GetLFTagCommandOutput extends GetLFTagResponse, __MetadataBearer {} /** - *

                                  Returns a tag definition.

                                  + *

                                  Returns an LF-tag definition.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -80,11 +83,11 @@ export class GetLFTagCommand extends $Command< } private serialize(input: GetLFTagCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1GetLFTagCommand(input, context); + return serializeAws_restJson1GetLFTagCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1GetLFTagCommand(output, context); + return deserializeAws_restJson1GetLFTagCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/GetQueryStateCommand.ts b/clients/client-lakeformation/src/commands/GetQueryStateCommand.ts new file mode 100644 index 000000000000..78494b739beb --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetQueryStateCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetQueryStateRequest, GetQueryStateResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetQueryStateCommand, + serializeAws_restJson1GetQueryStateCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetQueryStateCommandInput extends GetQueryStateRequest {} +export interface GetQueryStateCommandOutput extends GetQueryStateResponse, __MetadataBearer {} + +/** + *

                                  Returns the state of a query previously submitted. Clients are expected to poll GetQueryState to monitor the current state of the planning before retrieving the work units. A query state is only visible to the principal that made the initial call to StartQueryPlanning.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetQueryStateCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetQueryStateCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetQueryStateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryStateCommandInput} for command's `input` shape. + * @see {@link GetQueryStateCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetQueryStateCommand extends $Command< + GetQueryStateCommandInput, + GetQueryStateCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryStateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetQueryStateCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetQueryStateRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetQueryStateResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetQueryStateCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetQueryStateCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetQueryStateCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetQueryStatisticsCommand.ts b/clients/client-lakeformation/src/commands/GetQueryStatisticsCommand.ts new file mode 100644 index 000000000000..8953e5ea3c69 --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetQueryStatisticsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetQueryStatisticsRequest, GetQueryStatisticsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetQueryStatisticsCommand, + serializeAws_restJson1GetQueryStatisticsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetQueryStatisticsCommandInput extends GetQueryStatisticsRequest {} +export interface GetQueryStatisticsCommandOutput extends GetQueryStatisticsResponse, __MetadataBearer {} + +/** + *

                                  Retrieves statistics on the planning and execution of a query.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetQueryStatisticsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetQueryStatisticsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetQueryStatisticsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetQueryStatisticsCommandInput} for command's `input` shape. + * @see {@link GetQueryStatisticsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetQueryStatisticsCommand extends $Command< + GetQueryStatisticsCommandInput, + GetQueryStatisticsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetQueryStatisticsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetQueryStatisticsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetQueryStatisticsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetQueryStatisticsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetQueryStatisticsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetQueryStatisticsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetQueryStatisticsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetResourceLFTagsCommand.ts b/clients/client-lakeformation/src/commands/GetResourceLFTagsCommand.ts index ce234934b96a..ce42dac3a6c3 100644 --- a/clients/client-lakeformation/src/commands/GetResourceLFTagsCommand.ts +++ b/clients/client-lakeformation/src/commands/GetResourceLFTagsCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { GetResourceLFTagsRequest, GetResourceLFTagsResponse } from "../models/models_0"; import { - deserializeAws_json1_1GetResourceLFTagsCommand, - serializeAws_json1_1GetResourceLFTagsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1GetResourceLFTagsCommand, + serializeAws_restJson1GetResourceLFTagsCommand, +} from "../protocols/Aws_restJson1"; export interface GetResourceLFTagsCommandInput extends GetResourceLFTagsRequest {} export interface GetResourceLFTagsCommandOutput extends GetResourceLFTagsResponse, __MetadataBearer {} /** - *

                                  Returns the tags applied to a resource.

                                  + *

                                  Returns the LF-tags applied to a resource.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class GetResourceLFTagsCommand extends $Command< } private serialize(input: GetResourceLFTagsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1GetResourceLFTagsCommand(input, context); + return serializeAws_restJson1GetResourceLFTagsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1GetResourceLFTagsCommand(output, context); + return deserializeAws_restJson1GetResourceLFTagsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/GetTableObjectsCommand.ts b/clients/client-lakeformation/src/commands/GetTableObjectsCommand.ts new file mode 100644 index 000000000000..2fc48648630a --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetTableObjectsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetTableObjectsRequest, GetTableObjectsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetTableObjectsCommand, + serializeAws_restJson1GetTableObjectsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetTableObjectsCommandInput extends GetTableObjectsRequest {} +export interface GetTableObjectsCommandOutput extends GetTableObjectsResponse, __MetadataBearer {} + +/** + *

                                  Returns the set of Amazon S3 objects that make up the specified governed table. A transaction ID or timestamp can be specified for time-travel queries.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetTableObjectsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetTableObjectsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetTableObjectsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTableObjectsCommandInput} for command's `input` shape. + * @see {@link GetTableObjectsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetTableObjectsCommand extends $Command< + GetTableObjectsCommandInput, + GetTableObjectsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTableObjectsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetTableObjectsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTableObjectsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTableObjectsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetTableObjectsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetTableObjectsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetTableObjectsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetWorkUnitResultsCommand.ts b/clients/client-lakeformation/src/commands/GetWorkUnitResultsCommand.ts new file mode 100644 index 000000000000..5ba8b27f3ce5 --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetWorkUnitResultsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetWorkUnitResultsRequest, GetWorkUnitResultsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetWorkUnitResultsCommand, + serializeAws_restJson1GetWorkUnitResultsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetWorkUnitResultsCommandInput extends GetWorkUnitResultsRequest {} +export interface GetWorkUnitResultsCommandOutput extends GetWorkUnitResultsResponse, __MetadataBearer {} + +/** + *

                                  Returns the work units resulting from the query. Work units can be executed in any order and in parallel.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetWorkUnitResultsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetWorkUnitResultsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetWorkUnitResultsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetWorkUnitResultsCommandInput} for command's `input` shape. + * @see {@link GetWorkUnitResultsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetWorkUnitResultsCommand extends $Command< + GetWorkUnitResultsCommandInput, + GetWorkUnitResultsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetWorkUnitResultsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetWorkUnitResultsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetWorkUnitResultsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetWorkUnitResultsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetWorkUnitResultsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetWorkUnitResultsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetWorkUnitResultsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GetWorkUnitsCommand.ts b/clients/client-lakeformation/src/commands/GetWorkUnitsCommand.ts new file mode 100644 index 000000000000..2491935c89d1 --- /dev/null +++ b/clients/client-lakeformation/src/commands/GetWorkUnitsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { GetWorkUnitsRequest, GetWorkUnitsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetWorkUnitsCommand, + serializeAws_restJson1GetWorkUnitsCommand, +} from "../protocols/Aws_restJson1"; + +export interface GetWorkUnitsCommandInput extends GetWorkUnitsRequest {} +export interface GetWorkUnitsCommandOutput extends GetWorkUnitsResponse, __MetadataBearer {} + +/** + *

                                  Retrieves the work units generated by the StartQueryPlanning operation.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, GetWorkUnitsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, GetWorkUnitsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new GetWorkUnitsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetWorkUnitsCommandInput} for command's `input` shape. + * @see {@link GetWorkUnitsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class GetWorkUnitsCommand extends $Command< + GetWorkUnitsCommandInput, + GetWorkUnitsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetWorkUnitsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "GetWorkUnitsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetWorkUnitsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetWorkUnitsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetWorkUnitsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetWorkUnitsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetWorkUnitsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/GrantPermissionsCommand.ts b/clients/client-lakeformation/src/commands/GrantPermissionsCommand.ts index 48cd4126b282..a2752e0122da 100644 --- a/clients/client-lakeformation/src/commands/GrantPermissionsCommand.ts +++ b/clients/client-lakeformation/src/commands/GrantPermissionsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { GrantPermissionsRequest, GrantPermissionsResponse } from "../models/models_0"; import { - deserializeAws_json1_1GrantPermissionsCommand, - serializeAws_json1_1GrantPermissionsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1GrantPermissionsCommand, + serializeAws_restJson1GrantPermissionsCommand, +} from "../protocols/Aws_restJson1"; export interface GrantPermissionsCommandInput extends GrantPermissionsRequest {} export interface GrantPermissionsCommandOutput extends GrantPermissionsResponse, __MetadataBearer {} @@ -84,11 +84,11 @@ export class GrantPermissionsCommand extends $Command< } private serialize(input: GrantPermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1GrantPermissionsCommand(input, context); + return serializeAws_restJson1GrantPermissionsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1GrantPermissionsCommand(output, context); + return deserializeAws_restJson1GrantPermissionsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/ListDataCellsFilterCommand.ts b/clients/client-lakeformation/src/commands/ListDataCellsFilterCommand.ts new file mode 100644 index 000000000000..5618f991ac24 --- /dev/null +++ b/clients/client-lakeformation/src/commands/ListDataCellsFilterCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { ListDataCellsFilterRequest, ListDataCellsFilterResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListDataCellsFilterCommand, + serializeAws_restJson1ListDataCellsFilterCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListDataCellsFilterCommandInput extends ListDataCellsFilterRequest {} +export interface ListDataCellsFilterCommandOutput extends ListDataCellsFilterResponse, __MetadataBearer {} + +/** + *

                                  Lists all the data cell filters on a table.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, ListDataCellsFilterCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, ListDataCellsFilterCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new ListDataCellsFilterCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListDataCellsFilterCommandInput} for command's `input` shape. + * @see {@link ListDataCellsFilterCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class ListDataCellsFilterCommand extends $Command< + ListDataCellsFilterCommandInput, + ListDataCellsFilterCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListDataCellsFilterCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "ListDataCellsFilterCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListDataCellsFilterRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListDataCellsFilterResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListDataCellsFilterCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListDataCellsFilterCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListDataCellsFilterCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/ListLFTagsCommand.ts b/clients/client-lakeformation/src/commands/ListLFTagsCommand.ts index 5bd67da30b3d..d9034bc1d2ac 100644 --- a/clients/client-lakeformation/src/commands/ListLFTagsCommand.ts +++ b/clients/client-lakeformation/src/commands/ListLFTagsCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { ListLFTagsRequest, ListLFTagsResponse } from "../models/models_0"; import { - deserializeAws_json1_1ListLFTagsCommand, - serializeAws_json1_1ListLFTagsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1ListLFTagsCommand, + serializeAws_restJson1ListLFTagsCommand, +} from "../protocols/Aws_restJson1"; export interface ListLFTagsCommandInput extends ListLFTagsRequest {} export interface ListLFTagsCommandOutput extends ListLFTagsResponse, __MetadataBearer {} /** - *

                                  Lists tags that the requester has permission to view.

                                  + *

                                  Lists LF-tags that the requester has permission to view.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class ListLFTagsCommand extends $Command< } private serialize(input: ListLFTagsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1ListLFTagsCommand(input, context); + return serializeAws_restJson1ListLFTagsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1ListLFTagsCommand(output, context); + return deserializeAws_restJson1ListLFTagsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/ListPermissionsCommand.ts b/clients/client-lakeformation/src/commands/ListPermissionsCommand.ts index ade924c81894..b65f5e65a09e 100644 --- a/clients/client-lakeformation/src/commands/ListPermissionsCommand.ts +++ b/clients/client-lakeformation/src/commands/ListPermissionsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { ListPermissionsRequest, ListPermissionsResponse } from "../models/models_0"; import { - deserializeAws_json1_1ListPermissionsCommand, - serializeAws_json1_1ListPermissionsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1ListPermissionsCommand, + serializeAws_restJson1ListPermissionsCommand, +} from "../protocols/Aws_restJson1"; export interface ListPermissionsCommandInput extends ListPermissionsRequest {} export interface ListPermissionsCommandOutput extends ListPermissionsResponse, __MetadataBearer {} @@ -85,11 +85,11 @@ export class ListPermissionsCommand extends $Command< } private serialize(input: ListPermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1ListPermissionsCommand(input, context); + return serializeAws_restJson1ListPermissionsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1ListPermissionsCommand(output, context); + return deserializeAws_restJson1ListPermissionsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/ListResourcesCommand.ts b/clients/client-lakeformation/src/commands/ListResourcesCommand.ts index a45a86d92eb0..2b79bbc5c33c 100644 --- a/clients/client-lakeformation/src/commands/ListResourcesCommand.ts +++ b/clients/client-lakeformation/src/commands/ListResourcesCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { ListResourcesRequest, ListResourcesResponse } from "../models/models_0"; import { - deserializeAws_json1_1ListResourcesCommand, - serializeAws_json1_1ListResourcesCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1ListResourcesCommand, + serializeAws_restJson1ListResourcesCommand, +} from "../protocols/Aws_restJson1"; export interface ListResourcesCommandInput extends ListResourcesRequest {} export interface ListResourcesCommandOutput extends ListResourcesResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class ListResourcesCommand extends $Command< } private serialize(input: ListResourcesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1ListResourcesCommand(input, context); + return serializeAws_restJson1ListResourcesCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1ListResourcesCommand(output, context); + return deserializeAws_restJson1ListResourcesCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/ListTableStorageOptimizersCommand.ts b/clients/client-lakeformation/src/commands/ListTableStorageOptimizersCommand.ts new file mode 100644 index 000000000000..b9a0709f5fda --- /dev/null +++ b/clients/client-lakeformation/src/commands/ListTableStorageOptimizersCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { ListTableStorageOptimizersRequest, ListTableStorageOptimizersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTableStorageOptimizersCommand, + serializeAws_restJson1ListTableStorageOptimizersCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListTableStorageOptimizersCommandInput extends ListTableStorageOptimizersRequest {} +export interface ListTableStorageOptimizersCommandOutput extends ListTableStorageOptimizersResponse, __MetadataBearer {} + +/** + *

                                  Returns the configuration of all storage optimizers associated with a specified table.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, ListTableStorageOptimizersCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, ListTableStorageOptimizersCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new ListTableStorageOptimizersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTableStorageOptimizersCommandInput} for command's `input` shape. + * @see {@link ListTableStorageOptimizersCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class ListTableStorageOptimizersCommand extends $Command< + ListTableStorageOptimizersCommandInput, + ListTableStorageOptimizersCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTableStorageOptimizersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "ListTableStorageOptimizersCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTableStorageOptimizersRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTableStorageOptimizersResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTableStorageOptimizersCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTableStorageOptimizersCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1ListTableStorageOptimizersCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/ListTransactionsCommand.ts b/clients/client-lakeformation/src/commands/ListTransactionsCommand.ts new file mode 100644 index 000000000000..ab5c73eeb24d --- /dev/null +++ b/clients/client-lakeformation/src/commands/ListTransactionsCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { ListTransactionsRequest, ListTransactionsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTransactionsCommand, + serializeAws_restJson1ListTransactionsCommand, +} from "../protocols/Aws_restJson1"; + +export interface ListTransactionsCommandInput extends ListTransactionsRequest {} +export interface ListTransactionsCommandOutput extends ListTransactionsResponse, __MetadataBearer {} + +/** + *

                                  Returns metadata about transactions and their status. To prevent the response from growing indefinitely, only uncommitted transactions and those available for time-travel queries are returned.

                                  + *

                                  This operation can help you identify uncommitted transactions or to get information about transactions.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, ListTransactionsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, ListTransactionsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new ListTransactionsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTransactionsCommandInput} for command's `input` shape. + * @see {@link ListTransactionsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class ListTransactionsCommand extends $Command< + ListTransactionsCommandInput, + ListTransactionsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTransactionsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "ListTransactionsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTransactionsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTransactionsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTransactionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTransactionsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTransactionsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/PutDataLakeSettingsCommand.ts b/clients/client-lakeformation/src/commands/PutDataLakeSettingsCommand.ts index 484a83490084..06b4465a7ce6 100644 --- a/clients/client-lakeformation/src/commands/PutDataLakeSettingsCommand.ts +++ b/clients/client-lakeformation/src/commands/PutDataLakeSettingsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { PutDataLakeSettingsRequest, PutDataLakeSettingsResponse } from "../models/models_0"; import { - deserializeAws_json1_1PutDataLakeSettingsCommand, - serializeAws_json1_1PutDataLakeSettingsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1PutDataLakeSettingsCommand, + serializeAws_restJson1PutDataLakeSettingsCommand, +} from "../protocols/Aws_restJson1"; export interface PutDataLakeSettingsCommandInput extends PutDataLakeSettingsRequest {} export interface PutDataLakeSettingsCommandOutput extends PutDataLakeSettingsResponse, __MetadataBearer {} @@ -85,11 +85,11 @@ export class PutDataLakeSettingsCommand extends $Command< } private serialize(input: PutDataLakeSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1PutDataLakeSettingsCommand(input, context); + return serializeAws_restJson1PutDataLakeSettingsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1PutDataLakeSettingsCommand(output, context); + return deserializeAws_restJson1PutDataLakeSettingsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/RegisterResourceCommand.ts b/clients/client-lakeformation/src/commands/RegisterResourceCommand.ts index 1f7a9c6ceb9e..52b698c0a608 100644 --- a/clients/client-lakeformation/src/commands/RegisterResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/RegisterResourceCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { RegisterResourceRequest, RegisterResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1RegisterResourceCommand, - serializeAws_json1_1RegisterResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1RegisterResourceCommand, + serializeAws_restJson1RegisterResourceCommand, +} from "../protocols/Aws_restJson1"; export interface RegisterResourceCommandInput extends RegisterResourceRequest {} export interface RegisterResourceCommandOutput extends RegisterResourceResponse, __MetadataBearer {} @@ -24,9 +24,9 @@ export interface RegisterResourceCommandOutput extends RegisterResourceResponse, /** *

                                  Registers the resource as managed by the Data Catalog.

                                  * - *

                                  To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                  + *

                                  To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                  * - *

                                  The following request registers a new location and gives AWS Lake Formation permission to use the service-linked role to access that location.

                                  + *

                                  The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location.

                                  * *

                                  * ResourceArn = arn:aws:s3:::my-bucket @@ -98,11 +98,11 @@ export class RegisterResourceCommand extends $Command< } private serialize(input: RegisterResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1RegisterResourceCommand(input, context); + return serializeAws_restJson1RegisterResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1RegisterResourceCommand(output, context); + return deserializeAws_restJson1RegisterResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/RemoveLFTagsFromResourceCommand.ts b/clients/client-lakeformation/src/commands/RemoveLFTagsFromResourceCommand.ts index e0c7bb935137..9621b30a61bf 100644 --- a/clients/client-lakeformation/src/commands/RemoveLFTagsFromResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/RemoveLFTagsFromResourceCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { RemoveLFTagsFromResourceRequest, RemoveLFTagsFromResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1RemoveLFTagsFromResourceCommand, - serializeAws_json1_1RemoveLFTagsFromResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1RemoveLFTagsFromResourceCommand, + serializeAws_restJson1RemoveLFTagsFromResourceCommand, +} from "../protocols/Aws_restJson1"; export interface RemoveLFTagsFromResourceCommandInput extends RemoveLFTagsFromResourceRequest {} export interface RemoveLFTagsFromResourceCommandOutput extends RemoveLFTagsFromResourceResponse, __MetadataBearer {} /** - *

                                  Removes a tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                  + *

                                  Removes an LF-tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class RemoveLFTagsFromResourceCommand extends $Command< } private serialize(input: RemoveLFTagsFromResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1RemoveLFTagsFromResourceCommand(input, context); + return serializeAws_restJson1RemoveLFTagsFromResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1RemoveLFTagsFromResourceCommand(output, context); + return deserializeAws_restJson1RemoveLFTagsFromResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/RevokePermissionsCommand.ts b/clients/client-lakeformation/src/commands/RevokePermissionsCommand.ts index f258f5e1030f..56c4a3cb4d10 100644 --- a/clients/client-lakeformation/src/commands/RevokePermissionsCommand.ts +++ b/clients/client-lakeformation/src/commands/RevokePermissionsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { RevokePermissionsRequest, RevokePermissionsResponse } from "../models/models_0"; import { - deserializeAws_json1_1RevokePermissionsCommand, - serializeAws_json1_1RevokePermissionsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1RevokePermissionsCommand, + serializeAws_restJson1RevokePermissionsCommand, +} from "../protocols/Aws_restJson1"; export interface RevokePermissionsCommandInput extends RevokePermissionsRequest {} export interface RevokePermissionsCommandOutput extends RevokePermissionsResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class RevokePermissionsCommand extends $Command< } private serialize(input: RevokePermissionsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1RevokePermissionsCommand(input, context); + return serializeAws_restJson1RevokePermissionsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1RevokePermissionsCommand(output, context); + return deserializeAws_restJson1RevokePermissionsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/SearchDatabasesByLFTagsCommand.ts b/clients/client-lakeformation/src/commands/SearchDatabasesByLFTagsCommand.ts index ca4cc7cc43b5..a285c227024a 100644 --- a/clients/client-lakeformation/src/commands/SearchDatabasesByLFTagsCommand.ts +++ b/clients/client-lakeformation/src/commands/SearchDatabasesByLFTagsCommand.ts @@ -14,9 +14,9 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { SearchDatabasesByLFTagsRequest, SearchDatabasesByLFTagsResponse } from "../models/models_0"; import { - deserializeAws_json1_1SearchDatabasesByLFTagsCommand, - serializeAws_json1_1SearchDatabasesByLFTagsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1SearchDatabasesByLFTagsCommand, + serializeAws_restJson1SearchDatabasesByLFTagsCommand, +} from "../protocols/Aws_restJson1"; export interface SearchDatabasesByLFTagsCommandInput extends SearchDatabasesByLFTagsRequest {} export interface SearchDatabasesByLFTagsCommandOutput extends SearchDatabasesByLFTagsResponse, __MetadataBearer {} @@ -83,11 +83,11 @@ export class SearchDatabasesByLFTagsCommand extends $Command< } private serialize(input: SearchDatabasesByLFTagsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1SearchDatabasesByLFTagsCommand(input, context); + return serializeAws_restJson1SearchDatabasesByLFTagsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1SearchDatabasesByLFTagsCommand(output, context); + return deserializeAws_restJson1SearchDatabasesByLFTagsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/SearchTablesByLFTagsCommand.ts b/clients/client-lakeformation/src/commands/SearchTablesByLFTagsCommand.ts index d0ae807c76d4..51e632b0f459 100644 --- a/clients/client-lakeformation/src/commands/SearchTablesByLFTagsCommand.ts +++ b/clients/client-lakeformation/src/commands/SearchTablesByLFTagsCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { SearchTablesByLFTagsRequest, SearchTablesByLFTagsResponse } from "../models/models_0"; import { - deserializeAws_json1_1SearchTablesByLFTagsCommand, - serializeAws_json1_1SearchTablesByLFTagsCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1SearchTablesByLFTagsCommand, + serializeAws_restJson1SearchTablesByLFTagsCommand, +} from "../protocols/Aws_restJson1"; export interface SearchTablesByLFTagsCommandInput extends SearchTablesByLFTagsRequest {} export interface SearchTablesByLFTagsCommandOutput extends SearchTablesByLFTagsResponse, __MetadataBearer {} /** - *

                                  This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LFTags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                  + *

                                  This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LF-tags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class SearchTablesByLFTagsCommand extends $Command< } private serialize(input: SearchTablesByLFTagsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1SearchTablesByLFTagsCommand(input, context); + return serializeAws_restJson1SearchTablesByLFTagsCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1SearchTablesByLFTagsCommand(output, context); + return deserializeAws_restJson1SearchTablesByLFTagsCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/StartQueryPlanningCommand.ts b/clients/client-lakeformation/src/commands/StartQueryPlanningCommand.ts new file mode 100644 index 000000000000..35f411dfd516 --- /dev/null +++ b/clients/client-lakeformation/src/commands/StartQueryPlanningCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { StartQueryPlanningRequest, StartQueryPlanningResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StartQueryPlanningCommand, + serializeAws_restJson1StartQueryPlanningCommand, +} from "../protocols/Aws_restJson1"; + +export interface StartQueryPlanningCommandInput extends StartQueryPlanningRequest {} +export interface StartQueryPlanningCommandOutput extends StartQueryPlanningResponse, __MetadataBearer {} + +/** + *

                                  Submits a request to process a query statement.

                                  + * + *

                                  This operation generates work units that can be retrieved with the GetWorkUnits operation as soon as the query state is WORKUNITS_AVAILABLE or FINISHED.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, StartQueryPlanningCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, StartQueryPlanningCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new StartQueryPlanningCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartQueryPlanningCommandInput} for command's `input` shape. + * @see {@link StartQueryPlanningCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class StartQueryPlanningCommand extends $Command< + StartQueryPlanningCommandInput, + StartQueryPlanningCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartQueryPlanningCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "StartQueryPlanningCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartQueryPlanningRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartQueryPlanningResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartQueryPlanningCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StartQueryPlanningCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StartQueryPlanningCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/StartTransactionCommand.ts b/clients/client-lakeformation/src/commands/StartTransactionCommand.ts new file mode 100644 index 000000000000..4991e63da1dd --- /dev/null +++ b/clients/client-lakeformation/src/commands/StartTransactionCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { StartTransactionRequest, StartTransactionResponse } from "../models/models_0"; +import { + deserializeAws_restJson1StartTransactionCommand, + serializeAws_restJson1StartTransactionCommand, +} from "../protocols/Aws_restJson1"; + +export interface StartTransactionCommandInput extends StartTransactionRequest {} +export interface StartTransactionCommandOutput extends StartTransactionResponse, __MetadataBearer {} + +/** + *

                                  Starts a new transaction and returns its transaction ID. Transaction IDs are opaque objects that you can use to identify a transaction.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, StartTransactionCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, StartTransactionCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new StartTransactionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link StartTransactionCommandInput} for command's `input` shape. + * @see {@link StartTransactionCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class StartTransactionCommand extends $Command< + StartTransactionCommandInput, + StartTransactionCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: StartTransactionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "StartTransactionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: StartTransactionRequest.filterSensitiveLog, + outputFilterSensitiveLog: StartTransactionResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: StartTransactionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1StartTransactionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1StartTransactionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/UpdateLFTagCommand.ts b/clients/client-lakeformation/src/commands/UpdateLFTagCommand.ts index d31ea0531fa9..a71c26db87af 100644 --- a/clients/client-lakeformation/src/commands/UpdateLFTagCommand.ts +++ b/clients/client-lakeformation/src/commands/UpdateLFTagCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { UpdateLFTagRequest, UpdateLFTagResponse } from "../models/models_0"; import { - deserializeAws_json1_1UpdateLFTagCommand, - serializeAws_json1_1UpdateLFTagCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1UpdateLFTagCommand, + serializeAws_restJson1UpdateLFTagCommand, +} from "../protocols/Aws_restJson1"; export interface UpdateLFTagCommandInput extends UpdateLFTagRequest {} export interface UpdateLFTagCommandOutput extends UpdateLFTagResponse, __MetadataBearer {} /** - *

                                  Updates the list of possible values for the specified tag key. If the tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - "Update not allowed". Untag the attribute before deleting the tag key's value.

                                  + *

                                  Updates the list of possible values for the specified LF-tag key. If the LF-tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - "Update not allowed". Untag the attribute before deleting the LF-tag key's value.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class UpdateLFTagCommand extends $Command< } private serialize(input: UpdateLFTagCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1UpdateLFTagCommand(input, context); + return serializeAws_restJson1UpdateLFTagCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1UpdateLFTagCommand(output, context); + return deserializeAws_restJson1UpdateLFTagCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/UpdateResourceCommand.ts b/clients/client-lakeformation/src/commands/UpdateResourceCommand.ts index 163edeb6aae8..a28f46773f91 100644 --- a/clients/client-lakeformation/src/commands/UpdateResourceCommand.ts +++ b/clients/client-lakeformation/src/commands/UpdateResourceCommand.ts @@ -14,15 +14,15 @@ import { import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; import { UpdateResourceRequest, UpdateResourceResponse } from "../models/models_0"; import { - deserializeAws_json1_1UpdateResourceCommand, - serializeAws_json1_1UpdateResourceCommand, -} from "../protocols/Aws_json1_1"; + deserializeAws_restJson1UpdateResourceCommand, + serializeAws_restJson1UpdateResourceCommand, +} from "../protocols/Aws_restJson1"; export interface UpdateResourceCommandInput extends UpdateResourceRequest {} export interface UpdateResourceCommandOutput extends UpdateResourceResponse, __MetadataBearer {} /** - *

                                  Updates the data access role used for vending access to the given (registered) resource in AWS Lake Formation.

                                  + *

                                  Updates the data access role used for vending access to the given (registered) resource in Lake Formation.

                                  * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript @@ -83,11 +83,11 @@ export class UpdateResourceCommand extends $Command< } private serialize(input: UpdateResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { - return serializeAws_json1_1UpdateResourceCommand(input, context); + return serializeAws_restJson1UpdateResourceCommand(input, context); } private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { - return deserializeAws_json1_1UpdateResourceCommand(output, context); + return deserializeAws_restJson1UpdateResourceCommand(output, context); } // Start section: command_body_extra diff --git a/clients/client-lakeformation/src/commands/UpdateTableObjectsCommand.ts b/clients/client-lakeformation/src/commands/UpdateTableObjectsCommand.ts new file mode 100644 index 000000000000..405e3f35f956 --- /dev/null +++ b/clients/client-lakeformation/src/commands/UpdateTableObjectsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { UpdateTableObjectsRequest, UpdateTableObjectsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateTableObjectsCommand, + serializeAws_restJson1UpdateTableObjectsCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateTableObjectsCommandInput extends UpdateTableObjectsRequest {} +export interface UpdateTableObjectsCommandOutput extends UpdateTableObjectsResponse, __MetadataBearer {} + +/** + *

                                  Updates the manifest of Amazon S3 objects that make up the specified governed table.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, UpdateTableObjectsCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, UpdateTableObjectsCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new UpdateTableObjectsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateTableObjectsCommandInput} for command's `input` shape. + * @see {@link UpdateTableObjectsCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class UpdateTableObjectsCommand extends $Command< + UpdateTableObjectsCommandInput, + UpdateTableObjectsCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateTableObjectsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "UpdateTableObjectsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateTableObjectsRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateTableObjectsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateTableObjectsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateTableObjectsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateTableObjectsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/UpdateTableStorageOptimizerCommand.ts b/clients/client-lakeformation/src/commands/UpdateTableStorageOptimizerCommand.ts new file mode 100644 index 000000000000..16baaa10ae05 --- /dev/null +++ b/clients/client-lakeformation/src/commands/UpdateTableStorageOptimizerCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { LakeFormationClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../LakeFormationClient"; +import { UpdateTableStorageOptimizerRequest, UpdateTableStorageOptimizerResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateTableStorageOptimizerCommand, + serializeAws_restJson1UpdateTableStorageOptimizerCommand, +} from "../protocols/Aws_restJson1"; + +export interface UpdateTableStorageOptimizerCommandInput extends UpdateTableStorageOptimizerRequest {} +export interface UpdateTableStorageOptimizerCommandOutput + extends UpdateTableStorageOptimizerResponse, + __MetadataBearer {} + +/** + *

                                  Updates the configuration of the storage optimizers for a table.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { LakeFormationClient, UpdateTableStorageOptimizerCommand } from "@aws-sdk/client-lakeformation"; // ES Modules import + * // const { LakeFormationClient, UpdateTableStorageOptimizerCommand } = require("@aws-sdk/client-lakeformation"); // CommonJS import + * const client = new LakeFormationClient(config); + * const command = new UpdateTableStorageOptimizerCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateTableStorageOptimizerCommandInput} for command's `input` shape. + * @see {@link UpdateTableStorageOptimizerCommandOutput} for command's `response` shape. + * @see {@link LakeFormationClientResolvedConfig | config} for LakeFormationClient's `config` shape. + * + */ +export class UpdateTableStorageOptimizerCommand extends $Command< + UpdateTableStorageOptimizerCommandInput, + UpdateTableStorageOptimizerCommandOutput, + LakeFormationClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateTableStorageOptimizerCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: LakeFormationClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "LakeFormationClient"; + const commandName = "UpdateTableStorageOptimizerCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateTableStorageOptimizerRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateTableStorageOptimizerResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateTableStorageOptimizerCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateTableStorageOptimizerCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1UpdateTableStorageOptimizerCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-lakeformation/src/commands/index.ts b/clients/client-lakeformation/src/commands/index.ts index 712d603ee809..1f5d10ed566c 100644 --- a/clients/client-lakeformation/src/commands/index.ts +++ b/clients/client-lakeformation/src/commands/index.ts @@ -1,23 +1,42 @@ export * from "./AddLFTagsToResourceCommand"; export * from "./BatchGrantPermissionsCommand"; export * from "./BatchRevokePermissionsCommand"; +export * from "./CancelTransactionCommand"; +export * from "./CommitTransactionCommand"; +export * from "./CreateDataCellsFilterCommand"; export * from "./CreateLFTagCommand"; +export * from "./DeleteDataCellsFilterCommand"; export * from "./DeleteLFTagCommand"; +export * from "./DeleteObjectsOnCancelCommand"; export * from "./DeregisterResourceCommand"; export * from "./DescribeResourceCommand"; +export * from "./DescribeTransactionCommand"; +export * from "./ExtendTransactionCommand"; export * from "./GetDataLakeSettingsCommand"; export * from "./GetEffectivePermissionsForPathCommand"; export * from "./GetLFTagCommand"; +export * from "./GetQueryStateCommand"; +export * from "./GetQueryStatisticsCommand"; export * from "./GetResourceLFTagsCommand"; +export * from "./GetTableObjectsCommand"; +export * from "./GetWorkUnitResultsCommand"; +export * from "./GetWorkUnitsCommand"; export * from "./GrantPermissionsCommand"; +export * from "./ListDataCellsFilterCommand"; export * from "./ListLFTagsCommand"; export * from "./ListPermissionsCommand"; export * from "./ListResourcesCommand"; +export * from "./ListTableStorageOptimizersCommand"; +export * from "./ListTransactionsCommand"; export * from "./PutDataLakeSettingsCommand"; export * from "./RegisterResourceCommand"; export * from "./RemoveLFTagsFromResourceCommand"; export * from "./RevokePermissionsCommand"; export * from "./SearchDatabasesByLFTagsCommand"; export * from "./SearchTablesByLFTagsCommand"; +export * from "./StartQueryPlanningCommand"; +export * from "./StartTransactionCommand"; export * from "./UpdateLFTagCommand"; export * from "./UpdateResourceCommand"; +export * from "./UpdateTableObjectsCommand"; +export * from "./UpdateTableStorageOptimizerCommand"; diff --git a/clients/client-lakeformation/src/models/models_0.ts b/clients/client-lakeformation/src/models/models_0.ts index ce96312f334c..6e1ba98ea7ed 100644 --- a/clients/client-lakeformation/src/models/models_0.ts +++ b/clients/client-lakeformation/src/models/models_0.ts @@ -1,4 +1,6 @@ +import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; +import { Readable } from "stream"; /** *

                                  Access to a resource was denied.

                                  @@ -22,16 +24,16 @@ export namespace AccessDeniedException { } /** - *

                                  A structure containing a tag key-value pair.

                                  + *

                                  A structure containing an LF-tag key-value pair.

                                  */ export interface LFTagPair { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The key-name for the tag.

                                  + *

                                  The key-name for the LF-tag.

                                  */ TagKey: string | undefined; @@ -88,12 +90,46 @@ export namespace DatabaseResource { }); } +/** + *

                                  A structure for a data cells filter resource.

                                  + */ +export interface DataCellsFilterResource { + /** + *

                                  The ID of the catalog to which the table belongs.

                                  + */ + TableCatalogId?: string; + + /** + *

                                  A database in the Glue Data Catalog.

                                  + */ + DatabaseName?: string; + + /** + *

                                  The name of the table.

                                  + */ + TableName?: string; + + /** + *

                                  The name of the data cells filter.

                                  + */ + Name?: string; +} + +export namespace DataCellsFilterResource { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataCellsFilterResource): any => ({ + ...obj, + }); +} + /** *

                                  A structure for a data location object where permissions are granted or revoked.

                                  */ export interface DataLocationResource { /** - *

                                  The identifier for the Data Catalog where the location is registered with AWS Lake Formation. By default, it is the account ID of the caller.

                                  + *

                                  The identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller.

                                  */ CatalogId?: string; @@ -113,16 +149,16 @@ export namespace DataLocationResource { } /** - *

                                  A structure containing a tag key and values for a resource.

                                  + *

                                  A structure containing an LF-tag key and values for a resource.

                                  */ export interface LFTagKeyResource { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The key-name for the tag.

                                  + *

                                  The key-name for the LF-tag.

                                  */ TagKey: string | undefined; @@ -142,11 +178,11 @@ export namespace LFTagKeyResource { } /** - *

                                  A structure that allows an admin to grant user permissions on certain conditions. For example, granting a role access to all columns not tagged 'PII' of tables tagged 'Prod'.

                                  + *

                                  A structure that allows an admin to grant user permissions on certain conditions. For example, granting a role access to all columns that do not have the LF-tag 'PII' in tables that have the LF-tag 'Prod'.

                                  */ export interface LFTag { /** - *

                                  The key-name for the tag.

                                  + *

                                  The key-name for the LF-tag.

                                  */ TagKey: string | undefined; @@ -171,21 +207,21 @@ export enum ResourceType { } /** - *

                                  A structure containing a list of tag conditions that apply to a resource's tag policy.

                                  + *

                                  A structure containing a list of LF-tag conditions that apply to a resource's LF-tag policy.

                                  */ export interface LFTagPolicyResource { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The resource type for which the tag policy applies.

                                  + *

                                  The resource type for which the LF-tag policy applies.

                                  */ ResourceType: ResourceType | string | undefined; /** - *

                                  A list of tag conditions that apply to the resource's tag policy.

                                  + *

                                  A list of LF-tag conditions that apply to the resource's LF-tag policy.

                                  */ Expression: LFTag[] | undefined; } @@ -313,7 +349,7 @@ export namespace TableWithColumnsResource { */ export interface Resource { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ Catalog?: CatalogResource; @@ -338,12 +374,17 @@ export interface Resource { DataLocation?: DataLocationResource; /** - *

                                  The tag key and values attached to a resource.

                                  + *

                                  A data cell filter.

                                  + */ + DataCellsFilter?: DataCellsFilterResource; + + /** + *

                                  The LF-tag key and values attached to a resource.

                                  */ LFTag?: LFTagKeyResource; /** - *

                                  A list of tag conditions that define a resource's tag policy.

                                  + *

                                  A list of LF-tag conditions that define a resource's LF-tag policy.

                                  */ LFTagPolicy?: LFTagPolicyResource; } @@ -359,17 +400,17 @@ export namespace Resource { export interface AddLFTagsToResourceRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The resource to which to attach a tag.

                                  + *

                                  The database, table, or column resource to which to attach an LF-tag.

                                  */ Resource: Resource | undefined; /** - *

                                  The tags to attach to the resource.

                                  + *

                                  The LF-tags to attach to the resource.

                                  */ LFTags: LFTagPair[] | undefined; } @@ -412,12 +453,12 @@ export namespace ErrorDetail { */ export interface LFTagError { /** - *

                                  The key-name of the tag.

                                  + *

                                  The key-name of the LF-tag.

                                  */ LFTag?: LFTagPair; /** - *

                                  An error that occurred with the attachment or detachment of the tag.

                                  + *

                                  An error that occurred with the attachment or detachment of the LF-tag.

                                  */ Error?: ErrorDetail; } @@ -552,6 +593,55 @@ export namespace OperationTimeoutException { }); } +/** + *

                                  A new object to add to the governed table.

                                  + */ +export interface AddObjectInput { + /** + *

                                  The Amazon S3 location of the object.

                                  + */ + Uri: string | undefined; + + /** + *

                                  The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                  + */ + ETag: string | undefined; + + /** + *

                                  The size of the Amazon S3 object in bytes.

                                  + */ + Size: number | undefined; + + /** + *

                                  A list of partition values for the object. A value must be specified for each partition key associated with the table.

                                  + *

                                  The supported data types are integer, long, date(yyyy-MM-dd), timestamp(yyyy-MM-dd HH:mm:ssXXX or yyyy-MM-dd HH:mm:ss"), string and decimal.

                                  + */ + PartitionValues?: string[]; +} + +export namespace AddObjectInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AddObjectInput): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that you pass to indicate you want all rows in a filter.

                                  + */ +export interface AllRowsWildcard {} + +export namespace AllRowsWildcard { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AllRowsWildcard): any => ({ + ...obj, + }); +} + /** *

                                  A resource to be created or added already exists.

                                  */ @@ -596,7 +686,7 @@ export enum Permission { */ export interface DataLakePrincipal { /** - *

                                  An identifier for the AWS Lake Formation principal.

                                  + *

                                  An identifier for the Lake Formation principal.

                                  */ DataLakePrincipalIdentifier?: string; } @@ -651,7 +741,7 @@ export namespace BatchPermissionsRequestEntry { export interface BatchGrantPermissionsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -712,7 +802,7 @@ export namespace BatchGrantPermissionsResponse { export interface BatchRevokePermissionsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -747,535 +837,1525 @@ export namespace BatchRevokePermissionsResponse { }); } -export interface CreateLFTagRequest { - /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  - */ - CatalogId?: string; - +export interface CancelTransactionRequest { /** - *

                                  The key-name for the tag.

                                  + *

                                  The transaction to cancel.

                                  */ - TagKey: string | undefined; - - /** - *

                                  A list of possible values an attribute can take.

                                  - */ - TagValues: string[] | undefined; + TransactionId: string | undefined; } -export namespace CreateLFTagRequest { +export namespace CancelTransactionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: CreateLFTagRequest): any => ({ + export const filterSensitiveLog = (obj: CancelTransactionRequest): any => ({ ...obj, }); } -export interface CreateLFTagResponse {} +export interface CancelTransactionResponse {} -export namespace CreateLFTagResponse { +export namespace CancelTransactionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: CreateLFTagResponse): any => ({ + export const filterSensitiveLog = (obj: CancelTransactionResponse): any => ({ ...obj, }); } /** - *

                                  A resource numerical limit was exceeded.

                                  + *

                                  Contains details about an error related to a transaction commit that was in progress.

                                  */ -export interface ResourceNumberLimitExceededException extends __SmithyException, $MetadataBearer { - name: "ResourceNumberLimitExceededException"; +export interface TransactionCommitInProgressException extends __SmithyException, $MetadataBearer { + name: "TransactionCommitInProgressException"; $fault: "client"; /** - *

                                  A message describing the problem.

                                  + *

                                  A message describing the error.

                                  */ Message?: string; } -export namespace ResourceNumberLimitExceededException { +export namespace TransactionCommitInProgressException { /** * @internal */ - export const filterSensitiveLog = (obj: ResourceNumberLimitExceededException): any => ({ + export const filterSensitiveLog = (obj: TransactionCommitInProgressException): any => ({ ...obj, }); } -export interface DeleteLFTagRequest { - /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  - */ - CatalogId?: string; - +/** + *

                                  Contains details about an error where the specified transaction has already been committed and cannot be used for UpdateTableObjects.

                                  + */ +export interface TransactionCommittedException extends __SmithyException, $MetadataBearer { + name: "TransactionCommittedException"; + $fault: "client"; /** - *

                                  The key-name for the tag to delete.

                                  + *

                                  A message describing the error.

                                  */ - TagKey: string | undefined; + Message?: string; } -export namespace DeleteLFTagRequest { +export namespace TransactionCommittedException { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteLFTagRequest): any => ({ + export const filterSensitiveLog = (obj: TransactionCommittedException): any => ({ ...obj, }); } -export interface DeleteLFTagResponse {} +export interface CommitTransactionRequest { + /** + *

                                  The transaction to commit.

                                  + */ + TransactionId: string | undefined; +} -export namespace DeleteLFTagResponse { +export namespace CommitTransactionRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DeleteLFTagResponse): any => ({ + export const filterSensitiveLog = (obj: CommitTransactionRequest): any => ({ ...obj, }); } -export interface DeregisterResourceRequest { +export enum TransactionStatus { + ABORTED = "ABORTED", + ACTIVE = "ACTIVE", + COMMITTED = "COMMITTED", + COMMIT_IN_PROGRESS = "COMMIT_IN_PROGRESS", +} + +export interface CommitTransactionResponse { /** - *

                                  The Amazon Resource Name (ARN) of the resource that you want to deregister.

                                  + *

                                  The status of the transaction.

                                  */ - ResourceArn: string | undefined; + TransactionStatus?: TransactionStatus | string; } -export namespace DeregisterResourceRequest { +export namespace CommitTransactionResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DeregisterResourceRequest): any => ({ + export const filterSensitiveLog = (obj: CommitTransactionResponse): any => ({ ...obj, }); } -export interface DeregisterResourceResponse {} +/** + *

                                  Contains details about an error related to a transaction that was cancelled.

                                  + */ +export interface TransactionCanceledException extends __SmithyException, $MetadataBearer { + name: "TransactionCanceledException"; + $fault: "client"; + /** + *

                                  A message describing the error.

                                  + */ + Message?: string; +} -export namespace DeregisterResourceResponse { +export namespace TransactionCanceledException { /** * @internal */ - export const filterSensitiveLog = (obj: DeregisterResourceResponse): any => ({ + export const filterSensitiveLog = (obj: TransactionCanceledException): any => ({ ...obj, }); } -export interface DescribeResourceRequest { +/** + *

                                  A PartiQL predicate.

                                  + */ +export interface RowFilter { /** - *

                                  The resource ARN.

                                  + *

                                  A filter expression.

                                  */ - ResourceArn: string | undefined; + FilterExpression?: string; + + /** + *

                                  A wildcard for all rows.

                                  + */ + AllRowsWildcard?: AllRowsWildcard; } -export namespace DescribeResourceRequest { +export namespace RowFilter { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeResourceRequest): any => ({ + export const filterSensitiveLog = (obj: RowFilter): any => ({ ...obj, }); } /** - *

                                  A structure containing information about an AWS Lake Formation resource.

                                  + *

                                  A structure that describes certain columns on certain rows.

                                  */ -export interface ResourceInfo { +export interface DataCellsFilter { /** - *

                                  The Amazon Resource Name (ARN) of the resource.

                                  + *

                                  The ID of the catalog to which the table belongs.

                                  */ - ResourceArn?: string; + TableCatalogId: string | undefined; /** - *

                                  The IAM role that registered a resource.

                                  + *

                                  A database in the Glue Data Catalog.

                                  */ - RoleArn?: string; + DatabaseName: string | undefined; /** - *

                                  The date and time the resource was last modified.

                                  + *

                                  A table in the database.

                                  */ - LastModified?: Date; + TableName: string | undefined; + + /** + *

                                  The name given by the user to the data filter cell.

                                  + */ + Name: string | undefined; + + /** + *

                                  A PartiQL predicate.

                                  + */ + RowFilter?: RowFilter; + + /** + *

                                  A list of column names.

                                  + */ + ColumnNames?: string[]; + + /** + *

                                  A wildcard with exclusions.

                                  + */ + ColumnWildcard?: ColumnWildcard; } -export namespace ResourceInfo { +export namespace DataCellsFilter { /** * @internal */ - export const filterSensitiveLog = (obj: ResourceInfo): any => ({ + export const filterSensitiveLog = (obj: DataCellsFilter): any => ({ ...obj, }); } -export interface DescribeResourceResponse { +export interface CreateDataCellsFilterRequest { /** - *

                                  A structure containing information about an AWS Lake Formation resource.

                                  + *

                                  A DataCellsFilter structure containing information about the data cells filter.

                                  */ - ResourceInfo?: ResourceInfo; + TableData: DataCellsFilter | undefined; } -export namespace DescribeResourceResponse { +export namespace CreateDataCellsFilterRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DescribeResourceResponse): any => ({ + export const filterSensitiveLog = (obj: CreateDataCellsFilterRequest): any => ({ ...obj, }); } -export interface GetDataLakeSettingsRequest { - /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  - */ - CatalogId?: string; -} +export interface CreateDataCellsFilterResponse {} -export namespace GetDataLakeSettingsRequest { +export namespace CreateDataCellsFilterResponse { /** * @internal */ - export const filterSensitiveLog = (obj: GetDataLakeSettingsRequest): any => ({ + export const filterSensitiveLog = (obj: CreateDataCellsFilterResponse): any => ({ ...obj, }); } /** - *

                                  Permissions granted to a principal.

                                  + *

                                  A resource numerical limit was exceeded.

                                  */ -export interface PrincipalPermissions { - /** - *

                                  The principal who is granted permissions.

                                  - */ - Principal?: DataLakePrincipal; - +export interface ResourceNumberLimitExceededException extends __SmithyException, $MetadataBearer { + name: "ResourceNumberLimitExceededException"; + $fault: "client"; /** - *

                                  The permissions that are granted to the principal.

                                  + *

                                  A message describing the problem.

                                  */ - Permissions?: (Permission | string)[]; + Message?: string; } -export namespace PrincipalPermissions { +export namespace ResourceNumberLimitExceededException { /** * @internal */ - export const filterSensitiveLog = (obj: PrincipalPermissions): any => ({ + export const filterSensitiveLog = (obj: ResourceNumberLimitExceededException): any => ({ ...obj, }); } -/** - *

                                  A structure representing a list of AWS Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

                                  - */ -export interface DataLakeSettings { - /** - *

                                  A list of AWS Lake Formation principals. Supported principals are IAM users or IAM roles.

                                  - */ - DataLakeAdmins?: DataLakePrincipal[]; - +export interface CreateLFTagRequest { /** - *

                                  A structure representing a list of up to three principal permissions entries for default create database permissions.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ - CreateDatabaseDefaultPermissions?: PrincipalPermissions[]; + CatalogId?: string; /** - *

                                  A structure representing a list of up to three principal permissions entries for default create table permissions.

                                  + *

                                  The key-name for the LF-tag.

                                  */ - CreateTableDefaultPermissions?: PrincipalPermissions[]; + TagKey: string | undefined; /** - *

                                  A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's AWS CloudTrail log.

                                  - * - *

                                  You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

                                  + *

                                  A list of possible values an attribute can take.

                                  */ - TrustedResourceOwners?: string[]; + TagValues: string[] | undefined; } -export namespace DataLakeSettings { +export namespace CreateLFTagRequest { /** * @internal */ - export const filterSensitiveLog = (obj: DataLakeSettings): any => ({ + export const filterSensitiveLog = (obj: CreateLFTagRequest): any => ({ ...obj, }); } -export interface GetDataLakeSettingsResponse { - /** - *

                                  A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

                                  - */ - DataLakeSettings?: DataLakeSettings; -} +export interface CreateLFTagResponse {} -export namespace GetDataLakeSettingsResponse { +export namespace CreateLFTagResponse { /** * @internal */ - export const filterSensitiveLog = (obj: GetDataLakeSettingsResponse): any => ({ + export const filterSensitiveLog = (obj: CreateLFTagResponse): any => ({ ...obj, }); } -export interface GetEffectivePermissionsForPathRequest { +export interface DeleteDataCellsFilterRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The ID of the catalog to which the table belongs.

                                  */ - CatalogId?: string; + TableCatalogId?: string; /** - *

                                  The Amazon Resource Name (ARN) of the resource for which you want to get permissions.

                                  + *

                                  A database in the Glue Data Catalog.

                                  */ - ResourceArn: string | undefined; + DatabaseName?: string; /** - *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + *

                                  A table in the database.

                                  */ - NextToken?: string; + TableName?: string; /** - *

                                  The maximum number of results to return.

                                  + *

                                  The name given by the user to the data filter cell.

                                  */ - MaxResults?: number; + Name?: string; } -export namespace GetEffectivePermissionsForPathRequest { +export namespace DeleteDataCellsFilterRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetEffectivePermissionsForPathRequest): any => ({ + export const filterSensitiveLog = (obj: DeleteDataCellsFilterRequest): any => ({ ...obj, }); } -/** - *

                                  A structure containing the additional details to be returned in the AdditionalDetails attribute of PrincipalResourcePermissions.

                                  - * - *

                                  If a catalog resource is shared through AWS Resource Access Manager (AWS RAM), then there will exist a corresponding RAM resource share ARN.

                                  - */ -export interface DetailsMap { - /** - *

                                  A resource share ARN for a catalog resource shared through AWS Resource Access Manager (AWS RAM).

                                  - */ - ResourceShare?: string[]; -} +export interface DeleteDataCellsFilterResponse {} -export namespace DetailsMap { +export namespace DeleteDataCellsFilterResponse { /** * @internal */ - export const filterSensitiveLog = (obj: DetailsMap): any => ({ + export const filterSensitiveLog = (obj: DeleteDataCellsFilterResponse): any => ({ ...obj, }); } -/** - *

                                  The permissions granted or revoked on a resource.

                                  - */ -export interface PrincipalResourcePermissions { +export interface DeleteLFTagRequest { /** - *

                                  The Data Lake principal to be granted or revoked permissions.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ - Principal?: DataLakePrincipal; + CatalogId?: string; /** - *

                                  The resource where permissions are to be granted or revoked.

                                  + *

                                  The key-name for the LF-tag to delete.

                                  */ - Resource?: Resource; + TagKey: string | undefined; +} +export namespace DeleteLFTagRequest { /** - *

                                  The permissions to be granted or revoked on the resource.

                                  + * @internal */ - Permissions?: (Permission | string)[]; - - /** - *

                                  Indicates whether to grant the ability to grant permissions (as a subset of permissions granted).

                                  + export const filterSensitiveLog = (obj: DeleteLFTagRequest): any => ({ + ...obj, + }); +} + +export interface DeleteLFTagResponse {} + +export namespace DeleteLFTagResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteLFTagResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  An object that defines an Amazon S3 object to be deleted if a transaction cancels, provided that + * VirtualPut was called before writing the object.

                                  + */ +export interface VirtualObject { + /** + *

                                  The path to the Amazon S3 object. Must start with s3://

                                  + */ + Uri: string | undefined; + + /** + *

                                  The ETag of the Amazon S3 object.

                                  + */ + ETag?: string; +} + +export namespace VirtualObject { + /** + * @internal + */ + export const filterSensitiveLog = (obj: VirtualObject): any => ({ + ...obj, + }); +} + +export interface DeleteObjectsOnCancelRequest { + /** + *

                                  The Glue data catalog that contains the governed table. Defaults to the current account ID.

                                  + */ + CatalogId?: string; + + /** + *

                                  The database that contains the governed table.

                                  + */ + DatabaseName: string | undefined; + + /** + *

                                  The name of the governed table.

                                  + */ + TableName: string | undefined; + + /** + *

                                  ID of the transaction that the writes occur in.

                                  + */ + TransactionId: string | undefined; + + /** + *

                                  A list of VirtualObject structures, which indicates the Amazon S3 objects to be deleted if the transaction cancels.

                                  + */ + Objects: VirtualObject[] | undefined; +} + +export namespace DeleteObjectsOnCancelRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteObjectsOnCancelRequest): any => ({ + ...obj, + }); +} + +export interface DeleteObjectsOnCancelResponse {} + +export namespace DeleteObjectsOnCancelResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteObjectsOnCancelResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about an error related to a resource which is not ready for a transaction.

                                  + */ +export interface ResourceNotReadyException extends __SmithyException, $MetadataBearer { + name: "ResourceNotReadyException"; + $fault: "client"; + /** + *

                                  A message describing the error.

                                  + */ + Message?: string; +} + +export namespace ResourceNotReadyException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotReadyException): any => ({ + ...obj, + }); +} + +export interface DeregisterResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) of the resource that you want to deregister.

                                  + */ + ResourceArn: string | undefined; +} + +export namespace DeregisterResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeregisterResourceRequest): any => ({ + ...obj, + }); +} + +export interface DeregisterResourceResponse {} + +export namespace DeregisterResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeregisterResourceResponse): any => ({ + ...obj, + }); +} + +export interface DescribeResourceRequest { + /** + *

                                  The resource ARN.

                                  + */ + ResourceArn: string | undefined; +} + +export namespace DescribeResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeResourceRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure containing information about an Lake Formation resource.

                                  + */ +export interface ResourceInfo { + /** + *

                                  The Amazon Resource Name (ARN) of the resource.

                                  + */ + ResourceArn?: string; + + /** + *

                                  The IAM role that registered a resource.

                                  + */ + RoleArn?: string; + + /** + *

                                  The date and time the resource was last modified.

                                  + */ + LastModified?: Date; +} + +export namespace ResourceInfo { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceInfo): any => ({ + ...obj, + }); +} + +export interface DescribeResourceResponse { + /** + *

                                  A structure containing information about an Lake Formation resource.

                                  + */ + ResourceInfo?: ResourceInfo; +} + +export namespace DescribeResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeResourceResponse): any => ({ + ...obj, + }); +} + +export interface DescribeTransactionRequest { + /** + *

                                  The transaction for which to return status.

                                  + */ + TransactionId: string | undefined; +} + +export namespace DescribeTransactionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTransactionRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about a transaction.

                                  + */ +export interface TransactionDescription { + /** + *

                                  The ID of the transaction.

                                  + */ + TransactionId?: string; + + /** + *

                                  A status of ACTIVE, COMMITTED, or ABORTED.

                                  + */ + TransactionStatus?: TransactionStatus | string; + + /** + *

                                  The time when the transaction started.

                                  + */ + TransactionStartTime?: Date; + + /** + *

                                  The time when the transaction committed or aborted, if it is not currently active.

                                  + */ + TransactionEndTime?: Date; +} + +export namespace TransactionDescription { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TransactionDescription): any => ({ + ...obj, + }); +} + +export interface DescribeTransactionResponse { + /** + *

                                  Returns a TransactionDescription object containing information about the transaction.

                                  + */ + TransactionDescription?: TransactionDescription; +} + +export namespace DescribeTransactionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DescribeTransactionResponse): any => ({ + ...obj, + }); +} + +export interface ExtendTransactionRequest { + /** + *

                                  The transaction to extend.

                                  + */ + TransactionId?: string; +} + +export namespace ExtendTransactionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExtendTransactionRequest): any => ({ + ...obj, + }); +} + +export interface ExtendTransactionResponse {} + +export namespace ExtendTransactionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExtendTransactionResponse): any => ({ + ...obj, + }); +} + +export interface GetDataLakeSettingsRequest { + /** + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  + */ + CatalogId?: string; +} + +export namespace GetDataLakeSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDataLakeSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Permissions granted to a principal.

                                  + */ +export interface PrincipalPermissions { + /** + *

                                  The principal who is granted permissions.

                                  + */ + Principal?: DataLakePrincipal; + + /** + *

                                  The permissions that are granted to the principal.

                                  + */ + Permissions?: (Permission | string)[]; +} + +export namespace PrincipalPermissions { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PrincipalPermissions): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure representing a list of Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

                                  + */ +export interface DataLakeSettings { + /** + *

                                  A list of Lake Formation principals. Supported principals are IAM users or IAM roles.

                                  + */ + DataLakeAdmins?: DataLakePrincipal[]; + + /** + *

                                  Specifies whether access control on newly created database is managed by Lake Formation permissions or exclusively by IAM permissions. You can override this default setting when you create a database.

                                  + * + *

                                  A null value indicates access control by Lake Formation permissions. A value that assigns ALL to IAM_ALLOWED_PRINCIPALS indicates access control by IAM permissions. This is referred to as the setting "Use only IAM access control," and is for backward compatibility with the Glue permission model implemented by IAM permissions.

                                  + * + *

                                  The only permitted values are an empty array or an array that contains a single JSON object that grants ALL to IAM_ALLOWED_PRINCIPALS.

                                  + * + *

                                  For more information, see Changing the Default Security Settings for Your Data Lake.

                                  + */ + CreateDatabaseDefaultPermissions?: PrincipalPermissions[]; + + /** + *

                                  Specifies whether access control on newly created table is managed by Lake Formation permissions or exclusively by IAM permissions.

                                  + * + *

                                  A null value indicates access control by Lake Formation permissions. A value that assigns ALL to IAM_ALLOWED_PRINCIPALS indicates access control by IAM permissions. This is referred to as the setting "Use only IAM access control," and is for backward compatibility with the Glue permission model implemented by IAM permissions.

                                  + * + *

                                  The only permitted values are an empty array or an array that contains a single JSON object that grants ALL to IAM_ALLOWED_PRINCIPALS.

                                  + * + *

                                  For more information, see Changing the Default Security Settings for Your Data Lake.

                                  + */ + CreateTableDefaultPermissions?: PrincipalPermissions[]; + + /** + *

                                  A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's CloudTrail log.

                                  + * + *

                                  You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

                                  + */ + TrustedResourceOwners?: string[]; +} + +export namespace DataLakeSettings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataLakeSettings): any => ({ + ...obj, + }); +} + +export interface GetDataLakeSettingsResponse { + /** + *

                                  A structure representing a list of Lake Formation principals designated as data lake administrators.

                                  + */ + DataLakeSettings?: DataLakeSettings; +} + +export namespace GetDataLakeSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetDataLakeSettingsResponse): any => ({ + ...obj, + }); +} + +export interface GetEffectivePermissionsForPathRequest { + /** + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  + */ + CatalogId?: string; + + /** + *

                                  The Amazon Resource Name (ARN) of the resource for which you want to get permissions.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + */ + NextToken?: string; + + /** + *

                                  The maximum number of results to return.

                                  + */ + MaxResults?: number; +} + +export namespace GetEffectivePermissionsForPathRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEffectivePermissionsForPathRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure containing the additional details to be returned in the AdditionalDetails attribute of PrincipalResourcePermissions.

                                  + * + *

                                  If a catalog resource is shared through Resource Access Manager (RAM), then there will exist a corresponding RAM resource share ARN.

                                  + */ +export interface DetailsMap { + /** + *

                                  A resource share ARN for a catalog resource shared through RAM.

                                  + */ + ResourceShare?: string[]; +} + +export namespace DetailsMap { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DetailsMap): any => ({ + ...obj, + }); +} + +/** + *

                                  The permissions granted or revoked on a resource.

                                  + */ +export interface PrincipalResourcePermissions { + /** + *

                                  The Data Lake principal to be granted or revoked permissions.

                                  + */ + Principal?: DataLakePrincipal; + + /** + *

                                  The resource where permissions are to be granted or revoked.

                                  + */ + Resource?: Resource; + + /** + *

                                  The permissions to be granted or revoked on the resource.

                                  + */ + Permissions?: (Permission | string)[]; + + /** + *

                                  Indicates whether to grant the ability to grant permissions (as a subset of permissions granted).

                                  + */ + PermissionsWithGrantOption?: (Permission | string)[]; + + /** + *

                                  This attribute can be used to return any additional details of PrincipalResourcePermissions. Currently returns only as a RAM resource share ARN.

                                  + */ + AdditionalDetails?: DetailsMap; +} + +export namespace PrincipalResourcePermissions { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PrincipalResourcePermissions): any => ({ + ...obj, + }); +} + +export interface GetEffectivePermissionsForPathResponse { + /** + *

                                  A list of the permissions for the specified table or database resource located at the path in Amazon S3.

                                  + */ + Permissions?: PrincipalResourcePermissions[]; + + /** + *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + */ + NextToken?: string; +} + +export namespace GetEffectivePermissionsForPathResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetEffectivePermissionsForPathResponse): any => ({ + ...obj, + }); +} + +export interface GetLFTagRequest { + /** + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  + */ + CatalogId?: string; + + /** + *

                                  The key-name for the LF-tag.

                                  + */ + TagKey: string | undefined; +} + +export namespace GetLFTagRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLFTagRequest): any => ({ + ...obj, + }); +} + +export interface GetLFTagResponse { + /** + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  + */ + CatalogId?: string; + + /** + *

                                  The key-name for the LF-tag.

                                  + */ + TagKey?: string; + + /** + *

                                  A list of possible values an attribute can take.

                                  + */ + TagValues?: string[]; +} + +export namespace GetLFTagResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLFTagResponse): any => ({ + ...obj, + }); +} + +export interface GetQueryStateRequest { + /** + *

                                  The ID of the plan query operation.

                                  + */ + QueryId: string | undefined; +} + +export namespace GetQueryStateRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryStateRequest): any => ({ + ...obj, + }); +} + +export enum QueryStateString { + ERROR = "ERROR", + EXPIRED = "EXPIRED", + FINISHED = "FINISHED", + PENDING = "PENDING", + WORKUNITS_AVAILABLE = "WORKUNITS_AVAILABLE", +} + +/** + *

                                  A structure for the output.

                                  + */ +export interface GetQueryStateResponse { + /** + *

                                  An error message when the operation fails.

                                  + */ + Error?: string; + + /** + *

                                  The state of a query previously submitted. The possible states are:

                                  + * + *
                                    + *
                                  • + *

                                    PENDING: the query is pending.

                                    + *
                                  • + *
                                  • + *

                                    WORKUNITS_AVAILABLE: some work units are ready for retrieval and execution.

                                    + *
                                  • + *
                                  • + *

                                    FINISHED: the query planning finished successfully, and all work units are ready for retrieval and execution.

                                    + *
                                  • + *
                                  • + *

                                    ERROR: an error occurred with the query, such as an invalid query ID or a backend error.

                                    + *
                                  • + *
                                  + */ + State: QueryStateString | string | undefined; +} + +export namespace GetQueryStateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryStateResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about an error where the query request expired.

                                  + */ +export interface ExpiredException extends __SmithyException, $MetadataBearer { + name: "ExpiredException"; + $fault: "client"; + /** + *

                                  A message describing the error.

                                  + */ + Message?: string; +} + +export namespace ExpiredException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExpiredException): any => ({ + ...obj, + }); +} + +export interface GetQueryStatisticsRequest { + /** + *

                                  The ID of the plan query operation.

                                  + */ + QueryId: string | undefined; +} + +export namespace GetQueryStatisticsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryStatisticsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Statistics related to the processing of a query statement.

                                  + */ +export interface ExecutionStatistics { + /** + *

                                  The average time the request took to be executed.

                                  */ - PermissionsWithGrantOption?: (Permission | string)[]; + AverageExecutionTimeMillis?: number; /** - *

                                  This attribute can be used to return any additional details of PrincipalResourcePermissions. Currently returns only as a RAM resource share ARN.

                                  + *

                                  The amount of data that was scanned in bytes.

                                  */ - AdditionalDetails?: DetailsMap; + DataScannedBytes?: number; + + /** + *

                                  The number of work units executed.

                                  + */ + WorkUnitsExecutedCount?: number; } -export namespace PrincipalResourcePermissions { +export namespace ExecutionStatistics { /** * @internal */ - export const filterSensitiveLog = (obj: PrincipalResourcePermissions): any => ({ + export const filterSensitiveLog = (obj: ExecutionStatistics): any => ({ ...obj, }); } -export interface GetEffectivePermissionsForPathResponse { +/** + *

                                  Statistics related to the processing of a query statement.

                                  + */ +export interface PlanningStatistics { /** - *

                                  A list of the permissions for the specified table or database resource located at the path in Amazon S3.

                                  + *

                                  An estimate of the data that was scanned in bytes.

                                  */ - Permissions?: PrincipalResourcePermissions[]; + EstimatedDataToScanBytes?: number; /** - *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + *

                                  The time that it took to process the request.

                                  */ - NextToken?: string; + PlanningTimeMillis?: number; + + /** + *

                                  The time the request was in queue to be processed.

                                  + */ + QueueTimeMillis?: number; + + /** + *

                                  The number of work units generated.

                                  + */ + WorkUnitsGeneratedCount?: number; } -export namespace GetEffectivePermissionsForPathResponse { +export namespace PlanningStatistics { /** * @internal */ - export const filterSensitiveLog = (obj: GetEffectivePermissionsForPathResponse): any => ({ + export const filterSensitiveLog = (obj: PlanningStatistics): any => ({ ...obj, }); } -export interface GetLFTagRequest { +export interface GetQueryStatisticsResponse { + /** + *

                                  An ExecutionStatistics structure containing execution statistics.

                                  + */ + ExecutionStatistics?: ExecutionStatistics; + + /** + *

                                  A PlanningStatistics structure containing query planning statistics.

                                  + */ + PlanningStatistics?: PlanningStatistics; + + /** + *

                                  The time that the query was submitted.

                                  + */ + QuerySubmissionTime?: Date; +} + +export namespace GetQueryStatisticsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetQueryStatisticsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about an error related to statistics not being ready.

                                  + */ +export interface StatisticsNotReadyYetException extends __SmithyException, $MetadataBearer { + name: "StatisticsNotReadyYetException"; + $fault: "client"; + /** + *

                                  A message describing the error.

                                  + */ + Message?: string; +} + +export namespace StatisticsNotReadyYetException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StatisticsNotReadyYetException): any => ({ + ...obj, + }); +} + +/** + *

                                  Contains details about an error where the query request was throttled.

                                  + */ +export interface ThrottledException extends __SmithyException, $MetadataBearer { + name: "ThrottledException"; + $fault: "client"; + $retryable: { + throttling: true; + }; /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  A message describing the error.

                                  + */ + Message?: string; +} + +export namespace ThrottledException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottledException): any => ({ + ...obj, + }); +} + +export interface GetResourceLFTagsRequest { + /** + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The key-name for the tag.

                                  + *

                                  The database, table, or column resource for which you want to return LF-tags.

                                  */ - TagKey: string | undefined; + Resource: Resource | undefined; + + /** + *

                                  Indicates whether to show the assigned LF-tags.

                                  + */ + ShowAssignedLFTags?: boolean; } -export namespace GetLFTagRequest { +export namespace GetResourceLFTagsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetLFTagRequest): any => ({ + export const filterSensitiveLog = (obj: GetResourceLFTagsRequest): any => ({ ...obj, }); } -export interface GetLFTagResponse { +/** + *

                                  A structure containing the name of a column resource and the LF-tags attached to it.

                                  + */ +export interface ColumnLFTag { + /** + *

                                  The name of a column resource.

                                  + */ + Name?: string; + + /** + *

                                  The LF-tags attached to a column resource.

                                  + */ + LFTags?: LFTagPair[]; +} + +export namespace ColumnLFTag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ColumnLFTag): any => ({ + ...obj, + }); +} + +export interface GetResourceLFTagsResponse { + /** + *

                                  A list of LF-tags applied to a database resource.

                                  + */ + LFTagOnDatabase?: LFTagPair[]; + + /** + *

                                  A list of LF-tags applied to a table resource.

                                  + */ + LFTagsOnTable?: LFTagPair[]; + + /** + *

                                  A list of LF-tags applied to a column resource.

                                  + */ + LFTagsOnColumns?: ColumnLFTag[]; +} + +export namespace GetResourceLFTagsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetResourceLFTagsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  An encryption operation failed.

                                  + */ +export interface GlueEncryptionException extends __SmithyException, $MetadataBearer { + name: "GlueEncryptionException"; + $fault: "client"; + /** + *

                                  A message describing the problem.

                                  + */ + Message?: string; +} + +export namespace GlueEncryptionException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GlueEncryptionException): any => ({ + ...obj, + }); +} + +export interface GetTableObjectsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The catalog containing the governed table. Defaults to the caller’s account.

                                  */ CatalogId?: string; /** - *

                                  The key-name for the tag.

                                  + *

                                  The database containing the governed table.

                                  */ - TagKey?: string; + DatabaseName: string | undefined; /** - *

                                  A list of possible values an attribute can take.

                                  + *

                                  The governed table for which to retrieve objects.

                                  */ - TagValues?: string[]; + TableName: string | undefined; + + /** + *

                                  The transaction ID at which to read the governed table contents. If this transaction has aborted, an error is returned. If not set, defaults to the most recent committed transaction. Cannot be specified along with QueryAsOfTime.

                                  + */ + TransactionId?: string; + + /** + *

                                  The time as of when to read the governed table contents. If not set, the most recent transaction commit time is used. Cannot be specified along with TransactionId.

                                  + */ + QueryAsOfTime?: Date; + + /** + *

                                  A predicate to filter the objects returned based on the partition keys defined in the governed table.

                                  + *
                                    + *
                                  • + *

                                    The comparison operators supported are: =, >, <, >=, <=

                                    + *
                                  • + *
                                  • + *

                                    The logical operators supported are: AND

                                    + *
                                  • + *
                                  • + *

                                    The data types supported are integer, long, date(yyyy-MM-dd), timestamp(yyyy-MM-dd HH:mm:ssXXX or yyyy-MM-dd HH:mm:ss"), string and decimal.

                                    + *
                                  • + *
                                  + */ + PartitionPredicate?: string; + + /** + *

                                  Specifies how many values to return in a page.

                                  + */ + MaxResults?: number; + + /** + *

                                  A continuation token if this is not the first call to retrieve these objects.

                                  + */ + NextToken?: string; +} + +export namespace GetTableObjectsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableObjectsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Specifies the details of a governed table.

                                  + */ +export interface TableObject { + /** + *

                                  The Amazon S3 location of the object.

                                  + */ + Uri?: string; + + /** + *

                                  The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                  + */ + ETag?: string; + + /** + *

                                  The size of the Amazon S3 object in bytes.

                                  + */ + Size?: number; +} + +export namespace TableObject { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TableObject): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure containing a list of partition values and table objects.

                                  + */ +export interface PartitionObjects { + /** + *

                                  A list of partition values.

                                  + */ + PartitionValues?: string[]; + + /** + *

                                  A list of table objects

                                  + */ + Objects?: TableObject[]; +} + +export namespace PartitionObjects { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PartitionObjects): any => ({ + ...obj, + }); +} + +export interface GetTableObjectsResponse { + /** + *

                                  A list of objects organized by partition keys.

                                  + */ + Objects?: PartitionObjects[]; + + /** + *

                                  A continuation token indicating whether additional data is available.

                                  + */ + NextToken?: string; +} + +export namespace GetTableObjectsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTableObjectsResponse): any => ({ + ...obj, + }); +} + +export interface GetWorkUnitResultsRequest { + /** + *

                                  The ID of the plan query operation for which to get results.

                                  + */ + QueryId: string | undefined; + + /** + *

                                  The work unit ID for which to get results. Value generated by enumerating WorkUnitIdMin to WorkUnitIdMax (inclusive) from the WorkUnitRange in the output of GetWorkUnits.

                                  + */ + WorkUnitId: number | undefined; + + /** + *

                                  A work token used to query the execution service. Token output from GetWorkUnits.

                                  + */ + WorkUnitToken: string | undefined; +} + +export namespace GetWorkUnitResultsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetWorkUnitResultsRequest): any => ({ + ...obj, + ...(obj.WorkUnitToken && { WorkUnitToken: SENSITIVE_STRING }), + }); +} + +/** + *

                                  A structure for the output.

                                  + */ +export interface GetWorkUnitResultsResponse { + /** + *

                                  Rows returned from the GetWorkUnitResults operation as a stream of Apache Arrow v1.0 messages.

                                  + */ + ResultStream?: Readable | ReadableStream | Blob; } -export namespace GetLFTagResponse { +export namespace GetWorkUnitResultsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: GetLFTagResponse): any => ({ + export const filterSensitiveLog = (obj: GetWorkUnitResultsResponse): any => ({ ...obj, }); } -export interface GetResourceLFTagsRequest { +export interface GetWorkUnitsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  A continuation token, if this is a continuation call.

                                  */ - CatalogId?: string; + NextToken?: string; /** - *

                                  The resource for which you want to return tags.

                                  + *

                                  The size of each page to get in the Amazon Web Services service call. This does not affect the number of items returned in the command's output. Setting a smaller page size results in more calls to the Amazon Web Services service, retrieving fewer items in each call. This can help prevent the Amazon Web Services service calls from timing out.

                                  */ - Resource: Resource | undefined; + PageSize?: number; /** - *

                                  Indicates whether to show the assigned tags.

                                  + *

                                  The ID of the plan query operation.

                                  */ - ShowAssignedLFTags?: boolean; + QueryId: string | undefined; } -export namespace GetResourceLFTagsRequest { +export namespace GetWorkUnitsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: GetResourceLFTagsRequest): any => ({ + export const filterSensitiveLog = (obj: GetWorkUnitsRequest): any => ({ ...obj, }); } /** - *

                                  A structure containing the name of a column resource and the tags attached to it.

                                  + *

                                  Defines the valid range of work unit IDs for querying the execution service.

                                  */ -export interface ColumnLFTag { +export interface WorkUnitRange { /** - *

                                  The name of a column resource.

                                  + *

                                  Defines the maximum work unit ID in the range. The maximum value is inclusive.

                                  */ - Name?: string; + WorkUnitIdMax: number | undefined; /** - *

                                  The tags attached to a column resource.

                                  + *

                                  Defines the minimum work unit ID in the range.

                                  */ - LFTags?: LFTagPair[]; + WorkUnitIdMin: number | undefined; + + /** + *

                                  A work token used to query the execution service.

                                  + */ + WorkUnitToken: string | undefined; } -export namespace ColumnLFTag { +export namespace WorkUnitRange { /** * @internal */ - export const filterSensitiveLog = (obj: ColumnLFTag): any => ({ + export const filterSensitiveLog = (obj: WorkUnitRange): any => ({ ...obj, }); } -export interface GetResourceLFTagsResponse { +/** + *

                                  A structure for the output.

                                  + */ +export interface GetWorkUnitsResponse { /** - *

                                  A list of tags applied to a database resource.

                                  + *

                                  A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

                                  */ - LFTagOnDatabase?: LFTagPair[]; + NextToken?: string; /** - *

                                  A list of tags applied to a table resource.

                                  + *

                                  The ID of the plan query operation.

                                  */ - LFTagsOnTable?: LFTagPair[]; + QueryId: string | undefined; /** - *

                                  A list of tags applied to a column resource.

                                  + *

                                  A WorkUnitRangeList object that specifies the valid range of work unit IDs for querying the execution service.

                                  */ - LFTagsOnColumns?: ColumnLFTag[]; + WorkUnitRanges: WorkUnitRange[] | undefined; } -export namespace GetResourceLFTagsResponse { +export namespace GetWorkUnitsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: GetResourceLFTagsResponse): any => ({ + export const filterSensitiveLog = (obj: GetWorkUnitsResponse): any => ({ ...obj, }); } /** - *

                                  An encryption operation failed.

                                  + *

                                  Contains details about an error related to work units not being ready.

                                  */ -export interface GlueEncryptionException extends __SmithyException, $MetadataBearer { - name: "GlueEncryptionException"; +export interface WorkUnitsNotReadyYetException extends __SmithyException, $MetadataBearer { + name: "WorkUnitsNotReadyYetException"; $fault: "client"; /** - *

                                  A message describing the problem.

                                  + *

                                  A message describing the error.

                                  */ Message?: string; } -export namespace GlueEncryptionException { +export namespace WorkUnitsNotReadyYetException { /** * @internal */ - export const filterSensitiveLog = (obj: GlueEncryptionException): any => ({ + export const filterSensitiveLog = (obj: WorkUnitsNotReadyYetException): any => ({ ...obj, }); } export interface GrantPermissionsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -1286,12 +2366,12 @@ export interface GrantPermissionsRequest { Principal: DataLakePrincipal | undefined; /** - *

                                  The resource to which permissions are to be granted. Resources in AWS Lake Formation are the Data Catalog, databases, and tables.

                                  + *

                                  The resource to which permissions are to be granted. Resources in Lake Formation are the Data Catalog, databases, and tables.

                                  */ Resource: Resource | undefined; /** - *

                                  The permissions granted to the principal on the resource. AWS Lake Formation defines privileges to grant and revoke access to metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. AWS Lake Formation requires that each principal be authorized to perform a specific task on AWS Lake Formation resources.

                                  + *

                                  The permissions granted to the principal on the resource. Lake Formation defines privileges to grant and revoke access to metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. Lake Formation requires that each principal be authorized to perform a specific task on Lake Formation resources.

                                  */ Permissions: (Permission | string)[] | undefined; @@ -1321,6 +2401,53 @@ export namespace GrantPermissionsResponse { }); } +export interface ListDataCellsFilterRequest { + /** + *

                                  A table in the Glue Data Catalog.

                                  + */ + Table?: TableResource; + + /** + *

                                  A continuation token, if this is a continuation call.

                                  + */ + NextToken?: string; + + /** + *

                                  The maximum size of the response.

                                  + */ + MaxResults?: number; +} + +export namespace ListDataCellsFilterRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDataCellsFilterRequest): any => ({ + ...obj, + }); +} + +export interface ListDataCellsFilterResponse { + /** + *

                                  A list of DataCellFilter structures.

                                  + */ + DataCellsFilters?: DataCellsFilter[]; + + /** + *

                                  A continuation token, if not all requested data cell filters have been returned.

                                  + */ + NextToken?: string; +} + +export namespace ListDataCellsFilterResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListDataCellsFilterResponse): any => ({ + ...obj, + }); +} + export enum ResourceShareType { ALL = "ALL", FOREIGN = "FOREIGN", @@ -1328,12 +2455,12 @@ export enum ResourceShareType { export interface ListLFTagsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  If resource share type is ALL, returns both in-account tags and shared tags that the requester has permission to view. If resource share type is FOREIGN, returns all share tags that the requester can view. If no resource share type is passed, lists tags in the given catalog ID that the requester has permission to view.

                                  + *

                                  If resource share type is ALL, returns both in-account LF-tags and shared LF-tags that the requester has permission to view. If resource share type is FOREIGN, returns all share LF-tags that the requester can view. If no resource share type is passed, lists LF-tags in the given catalog ID that the requester has permission to view.

                                  */ ResourceShareType?: ResourceShareType | string; @@ -1359,7 +2486,7 @@ export namespace ListLFTagsRequest { export interface ListLFTagsResponse { /** - *

                                  A list of tags that the requested has permission to view.

                                  + *

                                  A list of LF-tags that the requested has permission to view.

                                  */ LFTags?: LFTagPair[]; @@ -1391,7 +2518,7 @@ export enum DataLakeResourceType { export interface ListPermissionsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -1403,159 +2530,334 @@ export interface ListPermissionsRequest { /** *

                                  Specifies a resource type to filter the permissions returned.

                                  */ - ResourceType?: DataLakeResourceType | string; + ResourceType?: DataLakeResourceType | string; + + /** + *

                                  A resource where you will get a list of the principal permissions.

                                  + *

                                  This operation does not support getting privileges on a table with columns. Instead, call this operation on the table, and the operation returns the table and the table w columns.

                                  + */ + Resource?: Resource; + + /** + *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + */ + NextToken?: string; + + /** + *

                                  The maximum number of results to return.

                                  + */ + MaxResults?: number; + + /** + *

                                  Indicates that related permissions should be included in the results.

                                  + */ + IncludeRelated?: string; +} + +export namespace ListPermissionsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionsRequest): any => ({ + ...obj, + }); +} + +export interface ListPermissionsResponse { + /** + *

                                  A list of principals and their permissions on the resource for the specified principal and resource types.

                                  + */ + PrincipalResourcePermissions?: PrincipalResourcePermissions[]; + + /** + *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + */ + NextToken?: string; +} + +export namespace ListPermissionsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPermissionsResponse): any => ({ + ...obj, + }); +} + +export enum ComparisonOperator { + BEGINS_WITH = "BEGINS_WITH", + BETWEEN = "BETWEEN", + CONTAINS = "CONTAINS", + EQ = "EQ", + GE = "GE", + GT = "GT", + IN = "IN", + LE = "LE", + LT = "LT", + NE = "NE", + NOT_CONTAINS = "NOT_CONTAINS", +} + +export enum FieldNameString { + LAST_MODIFIED = "LAST_MODIFIED", + RESOURCE_ARN = "RESOURCE_ARN", + ROLE_ARN = "ROLE_ARN", +} + +/** + *

                                  This structure describes the filtering of columns in a table based on a filter condition.

                                  + */ +export interface FilterCondition { + /** + *

                                  The field to filter in the filter condition.

                                  + */ + Field?: FieldNameString | string; + + /** + *

                                  The comparison operator used in the filter condition.

                                  + */ + ComparisonOperator?: ComparisonOperator | string; + + /** + *

                                  A string with values used in evaluating the filter condition.

                                  + */ + StringValueList?: string[]; +} + +export namespace FilterCondition { + /** + * @internal + */ + export const filterSensitiveLog = (obj: FilterCondition): any => ({ + ...obj, + }); +} + +export interface ListResourcesRequest { + /** + *

                                  Any applicable row-level and/or column-level filtering conditions for the resources.

                                  + */ + FilterConditionList?: FilterCondition[]; + + /** + *

                                  The maximum number of resource results.

                                  + */ + MaxResults?: number; + + /** + *

                                  A continuation token, if this is not the first call to retrieve these resources.

                                  + */ + NextToken?: string; +} + +export namespace ListResourcesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListResourcesRequest): any => ({ + ...obj, + }); +} + +export interface ListResourcesResponse { + /** + *

                                  A summary of the data lake resources.

                                  + */ + ResourceInfoList?: ResourceInfo[]; + + /** + *

                                  A continuation token, if this is not the first call to retrieve these resources.

                                  + */ + NextToken?: string; +} + +export namespace ListResourcesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListResourcesResponse): any => ({ + ...obj, + }); +} + +export enum OptimizerType { + COMPACTION = "COMPACTION", + GARBAGE_COLLECTION = "GARBAGE_COLLECTION", + GENERIC = "ALL", +} + +export interface ListTableStorageOptimizersRequest { + /** + *

                                  The Catalog ID of the table.

                                  + */ + CatalogId?: string; + + /** + *

                                  Name of the database where the table is present.

                                  + */ + DatabaseName: string | undefined; + + /** + *

                                  Name of the table.

                                  + */ + TableName: string | undefined; + + /** + *

                                  The specific type of storage optimizers to list. The supported value is compaction.

                                  + */ + StorageOptimizerType?: OptimizerType | string; + + /** + *

                                  The number of storage optimizers to return on each call.

                                  + */ + MaxResults?: number; + + /** + *

                                  A continuation token, if this is a continuation call.

                                  + */ + NextToken?: string; +} + +export namespace ListTableStorageOptimizersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTableStorageOptimizersRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure describing the configuration and details of a storage optimizer.

                                  + */ +export interface StorageOptimizer { + /** + *

                                  The specific type of storage optimizer. The supported value is compaction.

                                  + */ + StorageOptimizerType?: OptimizerType | string; + + /** + *

                                  A map of the storage optimizer configuration. Currently contains only one key-value pair: is_enabled indicates true or false for acceleration.

                                  + */ + Config?: { [key: string]: string }; /** - *

                                  A resource where you will get a list of the principal permissions.

                                  - *

                                  This operation does not support getting privileges on a table with columns. Instead, call this operation on the table, and the operation returns the table and the table w columns.

                                  + *

                                  A message that contains information about any error (if present).

                                  + * + *

                                  When an acceleration result has an enabled status, the error message is empty.

                                  + *

                                  When an acceleration result has a disabled status, the message describes an error or simply indicates "disabled by the user".

                                  */ - Resource?: Resource; + ErrorMessage?: string; /** - *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + *

                                  A message that contains information about any warnings (if present).

                                  */ - NextToken?: string; + Warnings?: string; /** - *

                                  The maximum number of results to return.

                                  + *

                                  When an acceleration result has an enabled status, contains the details of the last job run.

                                  */ - MaxResults?: number; + LastRunDetails?: string; } -export namespace ListPermissionsRequest { +export namespace StorageOptimizer { /** * @internal */ - export const filterSensitiveLog = (obj: ListPermissionsRequest): any => ({ + export const filterSensitiveLog = (obj: StorageOptimizer): any => ({ ...obj, }); } -export interface ListPermissionsResponse { +export interface ListTableStorageOptimizersResponse { /** - *

                                  A list of principals and their permissions on the resource for the specified principal and resource types.

                                  + *

                                  A list of the storage optimizers associated with a table.

                                  */ - PrincipalResourcePermissions?: PrincipalResourcePermissions[]; + StorageOptimizerList?: StorageOptimizer[]; /** - *

                                  A continuation token, if this is not the first call to retrieve this list.

                                  + *

                                  A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

                                  */ NextToken?: string; } -export namespace ListPermissionsResponse { +export namespace ListTableStorageOptimizersResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListPermissionsResponse): any => ({ + export const filterSensitiveLog = (obj: ListTableStorageOptimizersResponse): any => ({ ...obj, }); } -export enum ComparisonOperator { - BEGINS_WITH = "BEGINS_WITH", - BETWEEN = "BETWEEN", - CONTAINS = "CONTAINS", - EQ = "EQ", - GE = "GE", - GT = "GT", - IN = "IN", - LE = "LE", - LT = "LT", - NE = "NE", - NOT_CONTAINS = "NOT_CONTAINS", -} - -export enum FieldNameString { - LAST_MODIFIED = "LAST_MODIFIED", - RESOURCE_ARN = "RESOURCE_ARN", - ROLE_ARN = "ROLE_ARN", -} - -/** - *

                                  This structure describes the filtering of columns in a table based on a filter condition.

                                  - */ -export interface FilterCondition { - /** - *

                                  The field to filter in the filter condition.

                                  - */ - Field?: FieldNameString | string; - - /** - *

                                  The comparison operator used in the filter condition.

                                  - */ - ComparisonOperator?: ComparisonOperator | string; - - /** - *

                                  A string with values used in evaluating the filter condition.

                                  - */ - StringValueList?: string[]; +export enum TransactionStatusFilter { + ABORTED = "ABORTED", + ACTIVE = "ACTIVE", + ALL = "ALL", + COMMITTED = "COMMITTED", + COMPLETED = "COMPLETED", } -export namespace FilterCondition { +export interface ListTransactionsRequest { /** - * @internal + *

                                  The catalog for which to list transactions. Defaults to the account ID of the caller.

                                  */ - export const filterSensitiveLog = (obj: FilterCondition): any => ({ - ...obj, - }); -} + CatalogId?: string; -export interface ListResourcesRequest { /** - *

                                  Any applicable row-level and/or column-level filtering conditions for the resources.

                                  + *

                                  A filter indicating the status of transactions to return. Options are ALL | COMPLETED | COMMITTED | ABORTED | ACTIVE. The default is ALL.

                                  */ - FilterConditionList?: FilterCondition[]; + StatusFilter?: TransactionStatusFilter | string; /** - *

                                  The maximum number of resource results.

                                  + *

                                  The maximum number of transactions to return in a single call.

                                  */ MaxResults?: number; /** - *

                                  A continuation token, if this is not the first call to retrieve these resources.

                                  + *

                                  A continuation token if this is not the first call to retrieve transactions.

                                  */ NextToken?: string; } -export namespace ListResourcesRequest { +export namespace ListTransactionsRequest { /** * @internal */ - export const filterSensitiveLog = (obj: ListResourcesRequest): any => ({ + export const filterSensitiveLog = (obj: ListTransactionsRequest): any => ({ ...obj, }); } -export interface ListResourcesResponse { +export interface ListTransactionsResponse { /** - *

                                  A summary of the data lake resources.

                                  + *

                                  A list of transactions. The record for each transaction is a TransactionDescription object.

                                  */ - ResourceInfoList?: ResourceInfo[]; + Transactions?: TransactionDescription[]; /** - *

                                  A continuation token, if this is not the first call to retrieve these resources.

                                  + *

                                  A continuation token indicating whether additional data is available.

                                  */ NextToken?: string; } -export namespace ListResourcesResponse { +export namespace ListTransactionsResponse { /** * @internal */ - export const filterSensitiveLog = (obj: ListResourcesResponse): any => ({ + export const filterSensitiveLog = (obj: ListTransactionsResponse): any => ({ ...obj, }); } export interface PutDataLakeSettingsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

                                  + *

                                  A structure representing a list of Lake Formation principals designated as data lake administrators.

                                  */ DataLakeSettings: DataLakeSettings | undefined; } @@ -1587,7 +2889,7 @@ export interface RegisterResourceRequest { ResourceArn: string | undefined; /** - *

                                  Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

                                  + *

                                  Designates an Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

                                  * *

                                  For more information, see Using Service-Linked Roles for Lake Formation.

                                  */ @@ -1621,17 +2923,17 @@ export namespace RegisterResourceResponse { export interface RemoveLFTagsFromResourceRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The resource where you want to remove a tag.

                                  + *

                                  The database, table, or column resource where you want to remove an LF-tag.

                                  */ Resource: Resource | undefined; /** - *

                                  The tags to be removed from the resource.

                                  + *

                                  The LF-tags to be removed from the resource.

                                  */ LFTags: LFTagPair[] | undefined; } @@ -1663,7 +2965,7 @@ export namespace RemoveLFTagsFromResourceResponse { export interface RevokePermissionsRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -1721,7 +3023,7 @@ export interface SearchDatabasesByLFTagsRequest { MaxResults?: number; /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -1741,16 +3043,16 @@ export namespace SearchDatabasesByLFTagsRequest { } /** - *

                                  A structure describing a database resource with tags.

                                  + *

                                  A structure describing a database resource with LF-tags.

                                  */ export interface TaggedDatabase { /** - *

                                  A database that has tags attached to it.

                                  + *

                                  A database that has LF-tags attached to it.

                                  */ Database?: DatabaseResource; /** - *

                                  A list of tags attached to the database.

                                  + *

                                  A list of LF-tags attached to the database.

                                  */ LFTags?: LFTagPair[]; } @@ -1771,7 +3073,7 @@ export interface SearchDatabasesByLFTagsResponse { NextToken?: string; /** - *

                                  A list of databases that meet the tag conditions.

                                  + *

                                  A list of databases that meet the LF-tag conditions.

                                  */ DatabaseList?: TaggedDatabase[]; } @@ -1797,7 +3099,7 @@ export interface SearchTablesByLFTagsRequest { MaxResults?: number; /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; @@ -1817,26 +3119,26 @@ export namespace SearchTablesByLFTagsRequest { } /** - *

                                  A structure describing a table resource with tags.

                                  + *

                                  A structure describing a table resource with LF-tags.

                                  */ export interface TaggedTable { /** - *

                                  A table that has tags attached to it.

                                  + *

                                  A table that has LF-tags attached to it.

                                  */ Table?: TableResource; /** - *

                                  A list of tags attached to the database where the table resides.

                                  + *

                                  A list of LF-tags attached to the database where the table resides.

                                  */ LFTagOnDatabase?: LFTagPair[]; /** - *

                                  A list of tags attached to the table.

                                  + *

                                  A list of LF-tags attached to the table.

                                  */ LFTagsOnTable?: LFTagPair[]; /** - *

                                  A list of tags attached to columns in the table.

                                  + *

                                  A list of LF-tags attached to columns in the table.

                                  */ LFTagsOnColumns?: ColumnLFTag[]; } @@ -1857,7 +3159,7 @@ export interface SearchTablesByLFTagsResponse { NextToken?: string; /** - *

                                  A list of tables that meet the tag conditions.

                                  + *

                                  A list of tables that meet the LF-tag conditions.

                                  */ TableList?: TaggedTable[]; } @@ -1871,24 +3173,141 @@ export namespace SearchTablesByLFTagsResponse { }); } +/** + *

                                  A structure containing information about the query plan.

                                  + */ +export interface QueryPlanningContext { + /** + *

                                  The ID of the Data Catalog where the partition in question resides. If none is provided, the Amazon Web Services account ID is used by default.

                                  + */ + CatalogId?: string; + + /** + *

                                  The database containing the table.

                                  + */ + DatabaseName: string | undefined; + + /** + *

                                  The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                  + */ + QueryAsOfTime?: Date; + + /** + *

                                  A map consisting of key-value pairs.

                                  + */ + QueryParameters?: { [key: string]: string }; + + /** + *

                                  The transaction ID at which to read the table contents. If this transaction is not committed, the read will be treated as part of that transaction and will see its writes. If this transaction has aborted, an error will be returned. If not set, defaults to the most recent committed transaction. Cannot be specified along with QueryAsOfTime.

                                  + */ + TransactionId?: string; +} + +export namespace QueryPlanningContext { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryPlanningContext): any => ({ + ...obj, + }); +} + +export interface StartQueryPlanningRequest { + /** + *

                                  A structure containing information about the query plan.

                                  + */ + QueryPlanningContext: QueryPlanningContext | undefined; + + /** + *

                                  A PartiQL query statement used as an input to the planner service.

                                  + */ + QueryString: string | undefined; +} + +export namespace StartQueryPlanningRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartQueryPlanningRequest): any => ({ + ...obj, + ...(obj.QueryString && { QueryString: SENSITIVE_STRING }), + }); +} + +/** + *

                                  A structure for the output.

                                  + */ +export interface StartQueryPlanningResponse { + /** + *

                                  The ID of the plan query operation can be used to fetch the actual work unit descriptors that are produced as the result of the operation. The ID is also used to get the query state and as an input to the Execute operation.

                                  + */ + QueryId: string | undefined; +} + +export namespace StartQueryPlanningResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartQueryPlanningResponse): any => ({ + ...obj, + }); +} + +export enum TransactionType { + READ_AND_WRITE = "READ_AND_WRITE", + READ_ONLY = "READ_ONLY", +} + +export interface StartTransactionRequest { + /** + *

                                  Indicates whether this transaction should be read only or read and write. Writes made using a read-only transaction ID will be rejected. Read-only transactions do not need to be committed.

                                  + */ + TransactionType?: TransactionType | string; +} + +export namespace StartTransactionRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartTransactionRequest): any => ({ + ...obj, + }); +} + +export interface StartTransactionResponse { + /** + *

                                  An opaque identifier for the transaction.

                                  + */ + TransactionId?: string; +} + +export namespace StartTransactionResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: StartTransactionResponse): any => ({ + ...obj, + }); +} + export interface UpdateLFTagRequest { /** - *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                  + *

                                  The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                  */ CatalogId?: string; /** - *

                                  The key-name for the tag for which to add or delete values.

                                  + *

                                  The key-name for the LF-tag for which to add or delete values.

                                  */ TagKey: string | undefined; /** - *

                                  A list of tag values to delete from the tag.

                                  + *

                                  A list of LF-tag values to delete from the LF-tag.

                                  */ TagValuesToDelete?: string[]; /** - *

                                  A list of tag values to add from the tag.

                                  + *

                                  A list of LF-tag values to add from the LF-tag.

                                  */ TagValuesToAdd?: string[]; } @@ -1915,7 +3334,7 @@ export namespace UpdateLFTagResponse { export interface UpdateResourceRequest { /** - *

                                  The new role to use for the given resource registered in AWS Lake Formation.

                                  + *

                                  The new role to use for the given resource registered in Lake Formation.

                                  */ RoleArn: string | undefined; @@ -1944,3 +3363,150 @@ export namespace UpdateResourceResponse { ...obj, }); } + +/** + *

                                  An object to delete from the governed table.

                                  + */ +export interface DeleteObjectInput { + /** + *

                                  The Amazon S3 location of the object to delete.

                                  + */ + Uri: string | undefined; + + /** + *

                                  The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                  + */ + ETag?: string; + + /** + *

                                  A list of partition values for the object. A value must be specified for each partition key associated with the governed table.

                                  + */ + PartitionValues?: string[]; +} + +export namespace DeleteObjectInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteObjectInput): any => ({ + ...obj, + }); +} + +/** + *

                                  Defines an object to add to or delete from a governed table.

                                  + */ +export interface WriteOperation { + /** + *

                                  A new object to add to the governed table.

                                  + */ + AddObject?: AddObjectInput; + + /** + *

                                  An object to delete from the governed table.

                                  + */ + DeleteObject?: DeleteObjectInput; +} + +export namespace WriteOperation { + /** + * @internal + */ + export const filterSensitiveLog = (obj: WriteOperation): any => ({ + ...obj, + }); +} + +export interface UpdateTableObjectsRequest { + /** + *

                                  The catalog containing the governed table to update. Defaults to the caller’s account ID.

                                  + */ + CatalogId?: string; + + /** + *

                                  The database containing the governed table to update.

                                  + */ + DatabaseName: string | undefined; + + /** + *

                                  The governed table to update.

                                  + */ + TableName: string | undefined; + + /** + *

                                  The transaction at which to do the write.

                                  + */ + TransactionId: string | undefined; + + /** + *

                                  A list of WriteOperation objects that define an object to add to or delete from the manifest for a governed table.

                                  + */ + WriteOperations: WriteOperation[] | undefined; +} + +export namespace UpdateTableObjectsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableObjectsRequest): any => ({ + ...obj, + }); +} + +export interface UpdateTableObjectsResponse {} + +export namespace UpdateTableObjectsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableObjectsResponse): any => ({ + ...obj, + }); +} + +export interface UpdateTableStorageOptimizerRequest { + /** + *

                                  The Catalog ID of the table.

                                  + */ + CatalogId?: string; + + /** + *

                                  Name of the database where the table is present.

                                  + */ + DatabaseName: string | undefined; + + /** + *

                                  Name of the table for which to enable the storage optimizer.

                                  + */ + TableName: string | undefined; + + /** + *

                                  Name of the table for which to enable the storage optimizer.

                                  + */ + StorageOptimizerConfig: { [key: string]: { [key: string]: string } } | undefined; +} + +export namespace UpdateTableStorageOptimizerRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableStorageOptimizerRequest): any => ({ + ...obj, + }); +} + +export interface UpdateTableStorageOptimizerResponse { + /** + *

                                  A response indicating the success of failure of the operation.

                                  + */ + Result?: string; +} + +export namespace UpdateTableStorageOptimizerResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTableStorageOptimizerResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-lakeformation/src/pagination/GetTableObjectsPaginator.ts b/clients/client-lakeformation/src/pagination/GetTableObjectsPaginator.ts new file mode 100644 index 000000000000..d9b7e24e8636 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/GetTableObjectsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + GetTableObjectsCommand, + GetTableObjectsCommandInput, + GetTableObjectsCommandOutput, +} from "../commands/GetTableObjectsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: GetTableObjectsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetTableObjectsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: GetTableObjectsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getTableObjects(input, ...args); +}; +export async function* paginateGetTableObjects( + config: LakeFormationPaginationConfiguration, + input: GetTableObjectsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetTableObjectsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/GetWorkUnitsPaginator.ts b/clients/client-lakeformation/src/pagination/GetWorkUnitsPaginator.ts new file mode 100644 index 000000000000..092e6de23326 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/GetWorkUnitsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + GetWorkUnitsCommand, + GetWorkUnitsCommandInput, + GetWorkUnitsCommandOutput, +} from "../commands/GetWorkUnitsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: GetWorkUnitsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetWorkUnitsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: GetWorkUnitsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getWorkUnits(input, ...args); +}; +export async function* paginateGetWorkUnits( + config: LakeFormationPaginationConfiguration, + input: GetWorkUnitsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetWorkUnitsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["PageSize"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/ListDataCellsFilterPaginator.ts b/clients/client-lakeformation/src/pagination/ListDataCellsFilterPaginator.ts new file mode 100644 index 000000000000..ea503954f3e8 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/ListDataCellsFilterPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListDataCellsFilterCommand, + ListDataCellsFilterCommandInput, + ListDataCellsFilterCommandOutput, +} from "../commands/ListDataCellsFilterCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: ListDataCellsFilterCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListDataCellsFilterCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: ListDataCellsFilterCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listDataCellsFilter(input, ...args); +}; +export async function* paginateListDataCellsFilter( + config: LakeFormationPaginationConfiguration, + input: ListDataCellsFilterCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListDataCellsFilterCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/ListLFTagsPaginator.ts b/clients/client-lakeformation/src/pagination/ListLFTagsPaginator.ts new file mode 100644 index 000000000000..f5e91c619243 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/ListLFTagsPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListLFTagsCommand, ListLFTagsCommandInput, ListLFTagsCommandOutput } from "../commands/ListLFTagsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: ListLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListLFTagsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: ListLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listLFTags(input, ...args); +}; +export async function* paginateListLFTags( + config: LakeFormationPaginationConfiguration, + input: ListLFTagsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListLFTagsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/ListTableStorageOptimizersPaginator.ts b/clients/client-lakeformation/src/pagination/ListTableStorageOptimizersPaginator.ts new file mode 100644 index 000000000000..20141043b990 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/ListTableStorageOptimizersPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTableStorageOptimizersCommand, + ListTableStorageOptimizersCommandInput, + ListTableStorageOptimizersCommandOutput, +} from "../commands/ListTableStorageOptimizersCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: ListTableStorageOptimizersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTableStorageOptimizersCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: ListTableStorageOptimizersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTableStorageOptimizers(input, ...args); +}; +export async function* paginateListTableStorageOptimizers( + config: LakeFormationPaginationConfiguration, + input: ListTableStorageOptimizersCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTableStorageOptimizersCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/ListTransactionsPaginator.ts b/clients/client-lakeformation/src/pagination/ListTransactionsPaginator.ts new file mode 100644 index 000000000000..4d82f1b69721 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/ListTransactionsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTransactionsCommand, + ListTransactionsCommandInput, + ListTransactionsCommandOutput, +} from "../commands/ListTransactionsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: ListTransactionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTransactionsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: ListTransactionsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTransactions(input, ...args); +}; +export async function* paginateListTransactions( + config: LakeFormationPaginationConfiguration, + input: ListTransactionsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTransactionsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/SearchDatabasesByLFTagsPaginator.ts b/clients/client-lakeformation/src/pagination/SearchDatabasesByLFTagsPaginator.ts new file mode 100644 index 000000000000..1058c74956eb --- /dev/null +++ b/clients/client-lakeformation/src/pagination/SearchDatabasesByLFTagsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + SearchDatabasesByLFTagsCommand, + SearchDatabasesByLFTagsCommandInput, + SearchDatabasesByLFTagsCommandOutput, +} from "../commands/SearchDatabasesByLFTagsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: SearchDatabasesByLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new SearchDatabasesByLFTagsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: SearchDatabasesByLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.searchDatabasesByLFTags(input, ...args); +}; +export async function* paginateSearchDatabasesByLFTags( + config: LakeFormationPaginationConfiguration, + input: SearchDatabasesByLFTagsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: SearchDatabasesByLFTagsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/SearchTablesByLFTagsPaginator.ts b/clients/client-lakeformation/src/pagination/SearchTablesByLFTagsPaginator.ts new file mode 100644 index 000000000000..fa559feb08b8 --- /dev/null +++ b/clients/client-lakeformation/src/pagination/SearchTablesByLFTagsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + SearchTablesByLFTagsCommand, + SearchTablesByLFTagsCommandInput, + SearchTablesByLFTagsCommandOutput, +} from "../commands/SearchTablesByLFTagsCommand"; +import { LakeFormation } from "../LakeFormation"; +import { LakeFormationClient } from "../LakeFormationClient"; +import { LakeFormationPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: LakeFormationClient, + input: SearchTablesByLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new SearchTablesByLFTagsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: LakeFormation, + input: SearchTablesByLFTagsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.searchTablesByLFTags(input, ...args); +}; +export async function* paginateSearchTablesByLFTags( + config: LakeFormationPaginationConfiguration, + input: SearchTablesByLFTagsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: SearchTablesByLFTagsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof LakeFormation) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof LakeFormationClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected LakeFormation | LakeFormationClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-lakeformation/src/pagination/index.ts b/clients/client-lakeformation/src/pagination/index.ts index ba54a1bddb5f..f48b6a4e3e21 100644 --- a/clients/client-lakeformation/src/pagination/index.ts +++ b/clients/client-lakeformation/src/pagination/index.ts @@ -1,4 +1,12 @@ export * from "./GetEffectivePermissionsForPathPaginator"; +export * from "./GetTableObjectsPaginator"; +export * from "./GetWorkUnitsPaginator"; export * from "./Interfaces"; +export * from "./ListDataCellsFilterPaginator"; +export * from "./ListLFTagsPaginator"; export * from "./ListPermissionsPaginator"; export * from "./ListResourcesPaginator"; +export * from "./ListTableStorageOptimizersPaginator"; +export * from "./ListTransactionsPaginator"; +export * from "./SearchDatabasesByLFTagsPaginator"; +export * from "./SearchTablesByLFTagsPaginator"; diff --git a/clients/client-lakeformation/src/protocols/Aws_json1_1.ts b/clients/client-lakeformation/src/protocols/Aws_json1_1.ts deleted file mode 100644 index ce404de67c8b..000000000000 --- a/clients/client-lakeformation/src/protocols/Aws_json1_1.ts +++ /dev/null @@ -1,3906 +0,0 @@ -import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; -import { - expectNonNull as __expectNonNull, - expectNumber as __expectNumber, - expectString as __expectString, - parseEpochTimestamp as __parseEpochTimestamp, -} from "@aws-sdk/smithy-client"; -import { - Endpoint as __Endpoint, - HeaderBag as __HeaderBag, - MetadataBearer as __MetadataBearer, - ResponseMetadata as __ResponseMetadata, - SerdeContext as __SerdeContext, - SmithyException as __SmithyException, -} from "@aws-sdk/types"; - -import { - AddLFTagsToResourceCommandInput, - AddLFTagsToResourceCommandOutput, -} from "../commands/AddLFTagsToResourceCommand"; -import { - BatchGrantPermissionsCommandInput, - BatchGrantPermissionsCommandOutput, -} from "../commands/BatchGrantPermissionsCommand"; -import { - BatchRevokePermissionsCommandInput, - BatchRevokePermissionsCommandOutput, -} from "../commands/BatchRevokePermissionsCommand"; -import { CreateLFTagCommandInput, CreateLFTagCommandOutput } from "../commands/CreateLFTagCommand"; -import { DeleteLFTagCommandInput, DeleteLFTagCommandOutput } from "../commands/DeleteLFTagCommand"; -import { DeregisterResourceCommandInput, DeregisterResourceCommandOutput } from "../commands/DeregisterResourceCommand"; -import { DescribeResourceCommandInput, DescribeResourceCommandOutput } from "../commands/DescribeResourceCommand"; -import { - GetDataLakeSettingsCommandInput, - GetDataLakeSettingsCommandOutput, -} from "../commands/GetDataLakeSettingsCommand"; -import { - GetEffectivePermissionsForPathCommandInput, - GetEffectivePermissionsForPathCommandOutput, -} from "../commands/GetEffectivePermissionsForPathCommand"; -import { GetLFTagCommandInput, GetLFTagCommandOutput } from "../commands/GetLFTagCommand"; -import { GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput } from "../commands/GetResourceLFTagsCommand"; -import { GrantPermissionsCommandInput, GrantPermissionsCommandOutput } from "../commands/GrantPermissionsCommand"; -import { ListLFTagsCommandInput, ListLFTagsCommandOutput } from "../commands/ListLFTagsCommand"; -import { ListPermissionsCommandInput, ListPermissionsCommandOutput } from "../commands/ListPermissionsCommand"; -import { ListResourcesCommandInput, ListResourcesCommandOutput } from "../commands/ListResourcesCommand"; -import { - PutDataLakeSettingsCommandInput, - PutDataLakeSettingsCommandOutput, -} from "../commands/PutDataLakeSettingsCommand"; -import { RegisterResourceCommandInput, RegisterResourceCommandOutput } from "../commands/RegisterResourceCommand"; -import { - RemoveLFTagsFromResourceCommandInput, - RemoveLFTagsFromResourceCommandOutput, -} from "../commands/RemoveLFTagsFromResourceCommand"; -import { RevokePermissionsCommandInput, RevokePermissionsCommandOutput } from "../commands/RevokePermissionsCommand"; -import { - SearchDatabasesByLFTagsCommandInput, - SearchDatabasesByLFTagsCommandOutput, -} from "../commands/SearchDatabasesByLFTagsCommand"; -import { - SearchTablesByLFTagsCommandInput, - SearchTablesByLFTagsCommandOutput, -} from "../commands/SearchTablesByLFTagsCommand"; -import { UpdateLFTagCommandInput, UpdateLFTagCommandOutput } from "../commands/UpdateLFTagCommand"; -import { UpdateResourceCommandInput, UpdateResourceCommandOutput } from "../commands/UpdateResourceCommand"; -import { - AccessDeniedException, - AddLFTagsToResourceRequest, - AddLFTagsToResourceResponse, - AlreadyExistsException, - BatchGrantPermissionsRequest, - BatchGrantPermissionsResponse, - BatchPermissionsFailureEntry, - BatchPermissionsRequestEntry, - BatchRevokePermissionsRequest, - BatchRevokePermissionsResponse, - CatalogResource, - ColumnLFTag, - ColumnWildcard, - ConcurrentModificationException, - CreateLFTagRequest, - CreateLFTagResponse, - DatabaseResource, - DataLakePrincipal, - DataLakeSettings, - DataLocationResource, - DeleteLFTagRequest, - DeleteLFTagResponse, - DeregisterResourceRequest, - DeregisterResourceResponse, - DescribeResourceRequest, - DescribeResourceResponse, - DetailsMap, - EntityNotFoundException, - ErrorDetail, - FilterCondition, - GetDataLakeSettingsRequest, - GetDataLakeSettingsResponse, - GetEffectivePermissionsForPathRequest, - GetEffectivePermissionsForPathResponse, - GetLFTagRequest, - GetLFTagResponse, - GetResourceLFTagsRequest, - GetResourceLFTagsResponse, - GlueEncryptionException, - GrantPermissionsRequest, - GrantPermissionsResponse, - InternalServiceException, - InvalidInputException, - LFTag, - LFTagError, - LFTagKeyResource, - LFTagPair, - LFTagPolicyResource, - ListLFTagsRequest, - ListLFTagsResponse, - ListPermissionsRequest, - ListPermissionsResponse, - ListResourcesRequest, - ListResourcesResponse, - OperationTimeoutException, - Permission, - PrincipalPermissions, - PrincipalResourcePermissions, - PutDataLakeSettingsRequest, - PutDataLakeSettingsResponse, - RegisterResourceRequest, - RegisterResourceResponse, - RemoveLFTagsFromResourceRequest, - RemoveLFTagsFromResourceResponse, - Resource, - ResourceInfo, - ResourceNumberLimitExceededException, - RevokePermissionsRequest, - RevokePermissionsResponse, - SearchDatabasesByLFTagsRequest, - SearchDatabasesByLFTagsResponse, - SearchTablesByLFTagsRequest, - SearchTablesByLFTagsResponse, - TableResource, - TableWildcard, - TableWithColumnsResource, - TaggedDatabase, - TaggedTable, - UpdateLFTagRequest, - UpdateLFTagResponse, - UpdateResourceRequest, - UpdateResourceResponse, -} from "../models/models_0"; - -export const serializeAws_json1_1AddLFTagsToResourceCommand = async ( - input: AddLFTagsToResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.AddLFTagsToResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1AddLFTagsToResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1BatchGrantPermissionsCommand = async ( - input: BatchGrantPermissionsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.BatchGrantPermissions", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1BatchGrantPermissionsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1BatchRevokePermissionsCommand = async ( - input: BatchRevokePermissionsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.BatchRevokePermissions", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1BatchRevokePermissionsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1CreateLFTagCommand = async ( - input: CreateLFTagCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.CreateLFTag", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1CreateLFTagRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1DeleteLFTagCommand = async ( - input: DeleteLFTagCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.DeleteLFTag", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1DeleteLFTagRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1DeregisterResourceCommand = async ( - input: DeregisterResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.DeregisterResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1DeregisterResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1DescribeResourceCommand = async ( - input: DescribeResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.DescribeResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1DescribeResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1GetDataLakeSettingsCommand = async ( - input: GetDataLakeSettingsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.GetDataLakeSettings", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1GetDataLakeSettingsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1GetEffectivePermissionsForPathCommand = async ( - input: GetEffectivePermissionsForPathCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.GetEffectivePermissionsForPath", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1GetEffectivePermissionsForPathRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1GetLFTagCommand = async ( - input: GetLFTagCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.GetLFTag", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1GetLFTagRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1GetResourceLFTagsCommand = async ( - input: GetResourceLFTagsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.GetResourceLFTags", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1GetResourceLFTagsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1GrantPermissionsCommand = async ( - input: GrantPermissionsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.GrantPermissions", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1GrantPermissionsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1ListLFTagsCommand = async ( - input: ListLFTagsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.ListLFTags", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1ListLFTagsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1ListPermissionsCommand = async ( - input: ListPermissionsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.ListPermissions", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1ListPermissionsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1ListResourcesCommand = async ( - input: ListResourcesCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.ListResources", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1ListResourcesRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1PutDataLakeSettingsCommand = async ( - input: PutDataLakeSettingsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.PutDataLakeSettings", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1PutDataLakeSettingsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1RegisterResourceCommand = async ( - input: RegisterResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.RegisterResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1RegisterResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1RemoveLFTagsFromResourceCommand = async ( - input: RemoveLFTagsFromResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.RemoveLFTagsFromResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1RemoveLFTagsFromResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1RevokePermissionsCommand = async ( - input: RevokePermissionsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.RevokePermissions", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1RevokePermissionsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1SearchDatabasesByLFTagsCommand = async ( - input: SearchDatabasesByLFTagsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.SearchDatabasesByLFTags", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1SearchDatabasesByLFTagsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1SearchTablesByLFTagsCommand = async ( - input: SearchTablesByLFTagsCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.SearchTablesByLFTags", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1SearchTablesByLFTagsRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1UpdateLFTagCommand = async ( - input: UpdateLFTagCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.UpdateLFTag", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1UpdateLFTagRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const serializeAws_json1_1UpdateResourceCommand = async ( - input: UpdateResourceCommandInput, - context: __SerdeContext -): Promise<__HttpRequest> => { - const headers: __HeaderBag = { - "content-type": "application/x-amz-json-1.1", - "x-amz-target": "AWSLakeFormation.UpdateResource", - }; - let body: any; - body = JSON.stringify(serializeAws_json1_1UpdateResourceRequest(input, context)); - return buildHttpRpcRequest(context, headers, "/", undefined, body); -}; - -export const deserializeAws_json1_1AddLFTagsToResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1AddLFTagsToResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1AddLFTagsToResourceResponse(data, context); - const response: AddLFTagsToResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1AddLFTagsToResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ConcurrentModificationException": - case "com.amazonaws.lakeformation#ConcurrentModificationException": - response = { - ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1BatchGrantPermissionsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1BatchGrantPermissionsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1BatchGrantPermissionsResponse(data, context); - const response: BatchGrantPermissionsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1BatchGrantPermissionsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1BatchRevokePermissionsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1BatchRevokePermissionsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1BatchRevokePermissionsResponse(data, context); - const response: BatchRevokePermissionsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1BatchRevokePermissionsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1CreateLFTagCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1CreateLFTagCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1CreateLFTagResponse(data, context); - const response: CreateLFTagCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1CreateLFTagCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ResourceNumberLimitExceededException": - case "com.amazonaws.lakeformation#ResourceNumberLimitExceededException": - response = { - ...(await deserializeAws_json1_1ResourceNumberLimitExceededExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1DeleteLFTagCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1DeleteLFTagCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1DeleteLFTagResponse(data, context); - const response: DeleteLFTagCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1DeleteLFTagCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1DeregisterResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1DeregisterResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1DeregisterResourceResponse(data, context); - const response: DeregisterResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1DeregisterResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1DescribeResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1DescribeResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1DescribeResourceResponse(data, context); - const response: DescribeResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1DescribeResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1GetDataLakeSettingsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1GetDataLakeSettingsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1GetDataLakeSettingsResponse(data, context); - const response: GetDataLakeSettingsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1GetDataLakeSettingsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1GetEffectivePermissionsForPathCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1GetEffectivePermissionsForPathCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1GetEffectivePermissionsForPathResponse(data, context); - const response: GetEffectivePermissionsForPathCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1GetEffectivePermissionsForPathCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1GetLFTagCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1GetLFTagCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1GetLFTagResponse(data, context); - const response: GetLFTagCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1GetLFTagCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1GetResourceLFTagsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1GetResourceLFTagsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1GetResourceLFTagsResponse(data, context); - const response: GetResourceLFTagsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1GetResourceLFTagsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "GlueEncryptionException": - case "com.amazonaws.lakeformation#GlueEncryptionException": - response = { - ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1GrantPermissionsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1GrantPermissionsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1GrantPermissionsResponse(data, context); - const response: GrantPermissionsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1GrantPermissionsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "ConcurrentModificationException": - case "com.amazonaws.lakeformation#ConcurrentModificationException": - response = { - ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1ListLFTagsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1ListLFTagsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1ListLFTagsResponse(data, context); - const response: ListLFTagsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1ListLFTagsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1ListPermissionsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1ListPermissionsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1ListPermissionsResponse(data, context); - const response: ListPermissionsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1ListPermissionsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1ListResourcesCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1ListResourcesCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1ListResourcesResponse(data, context); - const response: ListResourcesCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1ListResourcesCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1PutDataLakeSettingsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1PutDataLakeSettingsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1PutDataLakeSettingsResponse(data, context); - const response: PutDataLakeSettingsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1PutDataLakeSettingsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1RegisterResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1RegisterResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1RegisterResourceResponse(data, context); - const response: RegisterResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1RegisterResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AlreadyExistsException": - case "com.amazonaws.lakeformation#AlreadyExistsException": - response = { - ...(await deserializeAws_json1_1AlreadyExistsExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1RemoveLFTagsFromResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1RemoveLFTagsFromResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1RemoveLFTagsFromResourceResponse(data, context); - const response: RemoveLFTagsFromResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1RemoveLFTagsFromResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ConcurrentModificationException": - case "com.amazonaws.lakeformation#ConcurrentModificationException": - response = { - ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "GlueEncryptionException": - case "com.amazonaws.lakeformation#GlueEncryptionException": - response = { - ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1RevokePermissionsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1RevokePermissionsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1RevokePermissionsResponse(data, context); - const response: RevokePermissionsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1RevokePermissionsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "ConcurrentModificationException": - case "com.amazonaws.lakeformation#ConcurrentModificationException": - response = { - ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1SearchDatabasesByLFTagsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1SearchDatabasesByLFTagsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1SearchDatabasesByLFTagsResponse(data, context); - const response: SearchDatabasesByLFTagsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1SearchDatabasesByLFTagsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "GlueEncryptionException": - case "com.amazonaws.lakeformation#GlueEncryptionException": - response = { - ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1SearchTablesByLFTagsCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1SearchTablesByLFTagsCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1SearchTablesByLFTagsResponse(data, context); - const response: SearchTablesByLFTagsCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1SearchTablesByLFTagsCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "GlueEncryptionException": - case "com.amazonaws.lakeformation#GlueEncryptionException": - response = { - ...(await deserializeAws_json1_1GlueEncryptionExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1UpdateLFTagCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateLFTagCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1UpdateLFTagResponse(data, context); - const response: UpdateLFTagCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1UpdateLFTagCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "AccessDeniedException": - case "com.amazonaws.lakeformation#AccessDeniedException": - response = { - ...(await deserializeAws_json1_1AccessDeniedExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "ConcurrentModificationException": - case "com.amazonaws.lakeformation#ConcurrentModificationException": - response = { - ...(await deserializeAws_json1_1ConcurrentModificationExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -export const deserializeAws_json1_1UpdateResourceCommand = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - if (output.statusCode >= 300) { - return deserializeAws_json1_1UpdateResourceCommandError(output, context); - } - const data: any = await parseBody(output.body, context); - let contents: any = {}; - contents = deserializeAws_json1_1UpdateResourceResponse(data, context); - const response: UpdateResourceCommandOutput = { - $metadata: deserializeMetadata(output), - ...contents, - }; - return Promise.resolve(response); -}; - -const deserializeAws_json1_1UpdateResourceCommandError = async ( - output: __HttpResponse, - context: __SerdeContext -): Promise => { - const parsedOutput: any = { - ...output, - body: await parseBody(output.body, context), - }; - let response: __SmithyException & __MetadataBearer & { [key: string]: any }; - let errorCode = "UnknownError"; - errorCode = loadRestJsonErrorCode(output, parsedOutput.body); - switch (errorCode) { - case "EntityNotFoundException": - case "com.amazonaws.lakeformation#EntityNotFoundException": - response = { - ...(await deserializeAws_json1_1EntityNotFoundExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InternalServiceException": - case "com.amazonaws.lakeformation#InternalServiceException": - response = { - ...(await deserializeAws_json1_1InternalServiceExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "InvalidInputException": - case "com.amazonaws.lakeformation#InvalidInputException": - response = { - ...(await deserializeAws_json1_1InvalidInputExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - case "OperationTimeoutException": - case "com.amazonaws.lakeformation#OperationTimeoutException": - response = { - ...(await deserializeAws_json1_1OperationTimeoutExceptionResponse(parsedOutput, context)), - name: errorCode, - $metadata: deserializeMetadata(output), - }; - break; - default: - const parsedBody = parsedOutput.body; - errorCode = parsedBody.code || parsedBody.Code || errorCode; - response = { - ...parsedBody, - name: `${errorCode}`, - message: parsedBody.message || parsedBody.Message || errorCode, - $fault: "client", - $metadata: deserializeMetadata(output), - } as any; - } - const message = response.message || response.Message || errorCode; - response.message = message; - delete response.Message; - return Promise.reject(Object.assign(new Error(message), response)); -}; - -const deserializeAws_json1_1AccessDeniedExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1AccessDeniedException(body, context); - const contents: AccessDeniedException = { - name: "AccessDeniedException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1AlreadyExistsExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1AlreadyExistsException(body, context); - const contents: AlreadyExistsException = { - name: "AlreadyExistsException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1ConcurrentModificationExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1ConcurrentModificationException(body, context); - const contents: ConcurrentModificationException = { - name: "ConcurrentModificationException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1EntityNotFoundExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1EntityNotFoundException(body, context); - const contents: EntityNotFoundException = { - name: "EntityNotFoundException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1GlueEncryptionExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1GlueEncryptionException(body, context); - const contents: GlueEncryptionException = { - name: "GlueEncryptionException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1InternalServiceExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1InternalServiceException(body, context); - const contents: InternalServiceException = { - name: "InternalServiceException", - $fault: "server", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1InvalidInputExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1InvalidInputException(body, context); - const contents: InvalidInputException = { - name: "InvalidInputException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1OperationTimeoutExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1OperationTimeoutException(body, context); - const contents: OperationTimeoutException = { - name: "OperationTimeoutException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const deserializeAws_json1_1ResourceNumberLimitExceededExceptionResponse = async ( - parsedOutput: any, - context: __SerdeContext -): Promise => { - const body = parsedOutput.body; - const deserialized: any = deserializeAws_json1_1ResourceNumberLimitExceededException(body, context); - const contents: ResourceNumberLimitExceededException = { - name: "ResourceNumberLimitExceededException", - $fault: "client", - $metadata: deserializeMetadata(parsedOutput), - ...deserialized, - }; - return contents; -}; - -const serializeAws_json1_1AddLFTagsToResourceRequest = ( - input: AddLFTagsToResourceRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.LFTags !== undefined && - input.LFTags !== null && { LFTags: serializeAws_json1_1LFTagsList(input.LFTags, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - }; -}; - -const serializeAws_json1_1BatchGrantPermissionsRequest = ( - input: BatchGrantPermissionsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Entries !== undefined && - input.Entries !== null && { - Entries: serializeAws_json1_1BatchPermissionsRequestEntryList(input.Entries, context), - }), - }; -}; - -const serializeAws_json1_1BatchPermissionsRequestEntry = ( - input: BatchPermissionsRequestEntry, - context: __SerdeContext -): any => { - return { - ...(input.Id !== undefined && input.Id !== null && { Id: input.Id }), - ...(input.Permissions !== undefined && - input.Permissions !== null && { Permissions: serializeAws_json1_1PermissionList(input.Permissions, context) }), - ...(input.PermissionsWithGrantOption !== undefined && - input.PermissionsWithGrantOption !== null && { - PermissionsWithGrantOption: serializeAws_json1_1PermissionList(input.PermissionsWithGrantOption, context), - }), - ...(input.Principal !== undefined && - input.Principal !== null && { Principal: serializeAws_json1_1DataLakePrincipal(input.Principal, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - }; -}; - -const serializeAws_json1_1BatchPermissionsRequestEntryList = ( - input: BatchPermissionsRequestEntry[], - context: __SerdeContext -): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1BatchPermissionsRequestEntry(entry, context); - }); -}; - -const serializeAws_json1_1BatchRevokePermissionsRequest = ( - input: BatchRevokePermissionsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Entries !== undefined && - input.Entries !== null && { - Entries: serializeAws_json1_1BatchPermissionsRequestEntryList(input.Entries, context), - }), - }; -}; - -const serializeAws_json1_1CatalogResource = (input: CatalogResource, context: __SerdeContext): any => { - return {}; -}; - -const serializeAws_json1_1ColumnNames = (input: string[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return entry; - }); -}; - -const serializeAws_json1_1ColumnWildcard = (input: ColumnWildcard, context: __SerdeContext): any => { - return { - ...(input.ExcludedColumnNames !== undefined && - input.ExcludedColumnNames !== null && { - ExcludedColumnNames: serializeAws_json1_1ColumnNames(input.ExcludedColumnNames, context), - }), - }; -}; - -const serializeAws_json1_1CreateLFTagRequest = (input: CreateLFTagRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - ...(input.TagValues !== undefined && - input.TagValues !== null && { TagValues: serializeAws_json1_1TagValueList(input.TagValues, context) }), - }; -}; - -const serializeAws_json1_1DatabaseResource = (input: DatabaseResource, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), - }; -}; - -const serializeAws_json1_1DataLakePrincipal = (input: DataLakePrincipal, context: __SerdeContext): any => { - return { - ...(input.DataLakePrincipalIdentifier !== undefined && - input.DataLakePrincipalIdentifier !== null && { DataLakePrincipalIdentifier: input.DataLakePrincipalIdentifier }), - }; -}; - -const serializeAws_json1_1DataLakePrincipalList = (input: DataLakePrincipal[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1DataLakePrincipal(entry, context); - }); -}; - -const serializeAws_json1_1DataLakeSettings = (input: DataLakeSettings, context: __SerdeContext): any => { - return { - ...(input.CreateDatabaseDefaultPermissions !== undefined && - input.CreateDatabaseDefaultPermissions !== null && { - CreateDatabaseDefaultPermissions: serializeAws_json1_1PrincipalPermissionsList( - input.CreateDatabaseDefaultPermissions, - context - ), - }), - ...(input.CreateTableDefaultPermissions !== undefined && - input.CreateTableDefaultPermissions !== null && { - CreateTableDefaultPermissions: serializeAws_json1_1PrincipalPermissionsList( - input.CreateTableDefaultPermissions, - context - ), - }), - ...(input.DataLakeAdmins !== undefined && - input.DataLakeAdmins !== null && { - DataLakeAdmins: serializeAws_json1_1DataLakePrincipalList(input.DataLakeAdmins, context), - }), - ...(input.TrustedResourceOwners !== undefined && - input.TrustedResourceOwners !== null && { - TrustedResourceOwners: serializeAws_json1_1TrustedResourceOwners(input.TrustedResourceOwners, context), - }), - }; -}; - -const serializeAws_json1_1DataLocationResource = (input: DataLocationResource, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - }; -}; - -const serializeAws_json1_1DeleteLFTagRequest = (input: DeleteLFTagRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - }; -}; - -const serializeAws_json1_1DeregisterResourceRequest = ( - input: DeregisterResourceRequest, - context: __SerdeContext -): any => { - return { - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - }; -}; - -const serializeAws_json1_1DescribeResourceRequest = (input: DescribeResourceRequest, context: __SerdeContext): any => { - return { - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - }; -}; - -const serializeAws_json1_1Expression = (input: LFTag[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1LFTag(entry, context); - }); -}; - -const serializeAws_json1_1FilterCondition = (input: FilterCondition, context: __SerdeContext): any => { - return { - ...(input.ComparisonOperator !== undefined && - input.ComparisonOperator !== null && { ComparisonOperator: input.ComparisonOperator }), - ...(input.Field !== undefined && input.Field !== null && { Field: input.Field }), - ...(input.StringValueList !== undefined && - input.StringValueList !== null && { - StringValueList: serializeAws_json1_1StringValueList(input.StringValueList, context), - }), - }; -}; - -const serializeAws_json1_1FilterConditionList = (input: FilterCondition[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1FilterCondition(entry, context); - }); -}; - -const serializeAws_json1_1GetDataLakeSettingsRequest = ( - input: GetDataLakeSettingsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - }; -}; - -const serializeAws_json1_1GetEffectivePermissionsForPathRequest = ( - input: GetEffectivePermissionsForPathRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - }; -}; - -const serializeAws_json1_1GetLFTagRequest = (input: GetLFTagRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - }; -}; - -const serializeAws_json1_1GetResourceLFTagsRequest = ( - input: GetResourceLFTagsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - ...(input.ShowAssignedLFTags !== undefined && - input.ShowAssignedLFTags !== null && { ShowAssignedLFTags: input.ShowAssignedLFTags }), - }; -}; - -const serializeAws_json1_1GrantPermissionsRequest = (input: GrantPermissionsRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Permissions !== undefined && - input.Permissions !== null && { Permissions: serializeAws_json1_1PermissionList(input.Permissions, context) }), - ...(input.PermissionsWithGrantOption !== undefined && - input.PermissionsWithGrantOption !== null && { - PermissionsWithGrantOption: serializeAws_json1_1PermissionList(input.PermissionsWithGrantOption, context), - }), - ...(input.Principal !== undefined && - input.Principal !== null && { Principal: serializeAws_json1_1DataLakePrincipal(input.Principal, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - }; -}; - -const serializeAws_json1_1LFTag = (input: LFTag, context: __SerdeContext): any => { - return { - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - ...(input.TagValues !== undefined && - input.TagValues !== null && { TagValues: serializeAws_json1_1TagValueList(input.TagValues, context) }), - }; -}; - -const serializeAws_json1_1LFTagKeyResource = (input: LFTagKeyResource, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - ...(input.TagValues !== undefined && - input.TagValues !== null && { TagValues: serializeAws_json1_1TagValueList(input.TagValues, context) }), - }; -}; - -const serializeAws_json1_1LFTagPair = (input: LFTagPair, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - ...(input.TagValues !== undefined && - input.TagValues !== null && { TagValues: serializeAws_json1_1TagValueList(input.TagValues, context) }), - }; -}; - -const serializeAws_json1_1LFTagPolicyResource = (input: LFTagPolicyResource, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Expression !== undefined && - input.Expression !== null && { Expression: serializeAws_json1_1Expression(input.Expression, context) }), - ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), - }; -}; - -const serializeAws_json1_1LFTagsList = (input: LFTagPair[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1LFTagPair(entry, context); - }); -}; - -const serializeAws_json1_1ListLFTagsRequest = (input: ListLFTagsRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.ResourceShareType !== undefined && - input.ResourceShareType !== null && { ResourceShareType: input.ResourceShareType }), - }; -}; - -const serializeAws_json1_1ListPermissionsRequest = (input: ListPermissionsRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - ...(input.Principal !== undefined && - input.Principal !== null && { Principal: serializeAws_json1_1DataLakePrincipal(input.Principal, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), - }; -}; - -const serializeAws_json1_1ListResourcesRequest = (input: ListResourcesRequest, context: __SerdeContext): any => { - return { - ...(input.FilterConditionList !== undefined && - input.FilterConditionList !== null && { - FilterConditionList: serializeAws_json1_1FilterConditionList(input.FilterConditionList, context), - }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - }; -}; - -const serializeAws_json1_1PermissionList = (input: (Permission | string)[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return entry; - }); -}; - -const serializeAws_json1_1PrincipalPermissions = (input: PrincipalPermissions, context: __SerdeContext): any => { - return { - ...(input.Permissions !== undefined && - input.Permissions !== null && { Permissions: serializeAws_json1_1PermissionList(input.Permissions, context) }), - ...(input.Principal !== undefined && - input.Principal !== null && { Principal: serializeAws_json1_1DataLakePrincipal(input.Principal, context) }), - }; -}; - -const serializeAws_json1_1PrincipalPermissionsList = (input: PrincipalPermissions[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return serializeAws_json1_1PrincipalPermissions(entry, context); - }); -}; - -const serializeAws_json1_1PutDataLakeSettingsRequest = ( - input: PutDataLakeSettingsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.DataLakeSettings !== undefined && - input.DataLakeSettings !== null && { - DataLakeSettings: serializeAws_json1_1DataLakeSettings(input.DataLakeSettings, context), - }), - }; -}; - -const serializeAws_json1_1RegisterResourceRequest = (input: RegisterResourceRequest, context: __SerdeContext): any => { - return { - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - ...(input.RoleArn !== undefined && input.RoleArn !== null && { RoleArn: input.RoleArn }), - ...(input.UseServiceLinkedRole !== undefined && - input.UseServiceLinkedRole !== null && { UseServiceLinkedRole: input.UseServiceLinkedRole }), - }; -}; - -const serializeAws_json1_1RemoveLFTagsFromResourceRequest = ( - input: RemoveLFTagsFromResourceRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.LFTags !== undefined && - input.LFTags !== null && { LFTags: serializeAws_json1_1LFTagsList(input.LFTags, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - }; -}; - -const serializeAws_json1_1Resource = (input: Resource, context: __SerdeContext): any => { - return { - ...(input.Catalog !== undefined && - input.Catalog !== null && { Catalog: serializeAws_json1_1CatalogResource(input.Catalog, context) }), - ...(input.DataLocation !== undefined && - input.DataLocation !== null && { - DataLocation: serializeAws_json1_1DataLocationResource(input.DataLocation, context), - }), - ...(input.Database !== undefined && - input.Database !== null && { Database: serializeAws_json1_1DatabaseResource(input.Database, context) }), - ...(input.LFTag !== undefined && - input.LFTag !== null && { LFTag: serializeAws_json1_1LFTagKeyResource(input.LFTag, context) }), - ...(input.LFTagPolicy !== undefined && - input.LFTagPolicy !== null && { - LFTagPolicy: serializeAws_json1_1LFTagPolicyResource(input.LFTagPolicy, context), - }), - ...(input.Table !== undefined && - input.Table !== null && { Table: serializeAws_json1_1TableResource(input.Table, context) }), - ...(input.TableWithColumns !== undefined && - input.TableWithColumns !== null && { - TableWithColumns: serializeAws_json1_1TableWithColumnsResource(input.TableWithColumns, context), - }), - }; -}; - -const serializeAws_json1_1RevokePermissionsRequest = ( - input: RevokePermissionsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Permissions !== undefined && - input.Permissions !== null && { Permissions: serializeAws_json1_1PermissionList(input.Permissions, context) }), - ...(input.PermissionsWithGrantOption !== undefined && - input.PermissionsWithGrantOption !== null && { - PermissionsWithGrantOption: serializeAws_json1_1PermissionList(input.PermissionsWithGrantOption, context), - }), - ...(input.Principal !== undefined && - input.Principal !== null && { Principal: serializeAws_json1_1DataLakePrincipal(input.Principal, context) }), - ...(input.Resource !== undefined && - input.Resource !== null && { Resource: serializeAws_json1_1Resource(input.Resource, context) }), - }; -}; - -const serializeAws_json1_1SearchDatabasesByLFTagsRequest = ( - input: SearchDatabasesByLFTagsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Expression !== undefined && - input.Expression !== null && { Expression: serializeAws_json1_1Expression(input.Expression, context) }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - }; -}; - -const serializeAws_json1_1SearchTablesByLFTagsRequest = ( - input: SearchTablesByLFTagsRequest, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.Expression !== undefined && - input.Expression !== null && { Expression: serializeAws_json1_1Expression(input.Expression, context) }), - ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), - ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), - }; -}; - -const serializeAws_json1_1StringValueList = (input: string[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return entry; - }); -}; - -const serializeAws_json1_1TableResource = (input: TableResource, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), - ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), - ...(input.TableWildcard !== undefined && - input.TableWildcard !== null && { - TableWildcard: serializeAws_json1_1TableWildcard(input.TableWildcard, context), - }), - }; -}; - -const serializeAws_json1_1TableWildcard = (input: TableWildcard, context: __SerdeContext): any => { - return {}; -}; - -const serializeAws_json1_1TableWithColumnsResource = ( - input: TableWithColumnsResource, - context: __SerdeContext -): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.ColumnNames !== undefined && - input.ColumnNames !== null && { ColumnNames: serializeAws_json1_1ColumnNames(input.ColumnNames, context) }), - ...(input.ColumnWildcard !== undefined && - input.ColumnWildcard !== null && { - ColumnWildcard: serializeAws_json1_1ColumnWildcard(input.ColumnWildcard, context), - }), - ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), - ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), - }; -}; - -const serializeAws_json1_1TagValueList = (input: string[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return entry; - }); -}; - -const serializeAws_json1_1TrustedResourceOwners = (input: string[], context: __SerdeContext): any => { - return input - .filter((e: any) => e != null) - .map((entry) => { - if (entry === null) { - return null as any; - } - return entry; - }); -}; - -const serializeAws_json1_1UpdateLFTagRequest = (input: UpdateLFTagRequest, context: __SerdeContext): any => { - return { - ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), - ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), - ...(input.TagValuesToAdd !== undefined && - input.TagValuesToAdd !== null && { - TagValuesToAdd: serializeAws_json1_1TagValueList(input.TagValuesToAdd, context), - }), - ...(input.TagValuesToDelete !== undefined && - input.TagValuesToDelete !== null && { - TagValuesToDelete: serializeAws_json1_1TagValueList(input.TagValuesToDelete, context), - }), - }; -}; - -const serializeAws_json1_1UpdateResourceRequest = (input: UpdateResourceRequest, context: __SerdeContext): any => { - return { - ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), - ...(input.RoleArn !== undefined && input.RoleArn !== null && { RoleArn: input.RoleArn }), - }; -}; - -const deserializeAws_json1_1AccessDeniedException = (output: any, context: __SerdeContext): AccessDeniedException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1AddLFTagsToResourceResponse = ( - output: any, - context: __SerdeContext -): AddLFTagsToResourceResponse => { - return { - Failures: - output.Failures !== undefined && output.Failures !== null - ? deserializeAws_json1_1LFTagErrors(output.Failures, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1AlreadyExistsException = (output: any, context: __SerdeContext): AlreadyExistsException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1BatchGrantPermissionsResponse = ( - output: any, - context: __SerdeContext -): BatchGrantPermissionsResponse => { - return { - Failures: - output.Failures !== undefined && output.Failures !== null - ? deserializeAws_json1_1BatchPermissionsFailureList(output.Failures, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1BatchPermissionsFailureEntry = ( - output: any, - context: __SerdeContext -): BatchPermissionsFailureEntry => { - return { - Error: - output.Error !== undefined && output.Error !== null - ? deserializeAws_json1_1ErrorDetail(output.Error, context) - : undefined, - RequestEntry: - output.RequestEntry !== undefined && output.RequestEntry !== null - ? deserializeAws_json1_1BatchPermissionsRequestEntry(output.RequestEntry, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1BatchPermissionsFailureList = ( - output: any, - context: __SerdeContext -): BatchPermissionsFailureEntry[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1BatchPermissionsFailureEntry(entry, context); - }); -}; - -const deserializeAws_json1_1BatchPermissionsRequestEntry = ( - output: any, - context: __SerdeContext -): BatchPermissionsRequestEntry => { - return { - Id: __expectString(output.Id), - Permissions: - output.Permissions !== undefined && output.Permissions !== null - ? deserializeAws_json1_1PermissionList(output.Permissions, context) - : undefined, - PermissionsWithGrantOption: - output.PermissionsWithGrantOption !== undefined && output.PermissionsWithGrantOption !== null - ? deserializeAws_json1_1PermissionList(output.PermissionsWithGrantOption, context) - : undefined, - Principal: - output.Principal !== undefined && output.Principal !== null - ? deserializeAws_json1_1DataLakePrincipal(output.Principal, context) - : undefined, - Resource: - output.Resource !== undefined && output.Resource !== null - ? deserializeAws_json1_1Resource(output.Resource, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1BatchRevokePermissionsResponse = ( - output: any, - context: __SerdeContext -): BatchRevokePermissionsResponse => { - return { - Failures: - output.Failures !== undefined && output.Failures !== null - ? deserializeAws_json1_1BatchPermissionsFailureList(output.Failures, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1CatalogResource = (output: any, context: __SerdeContext): CatalogResource => { - return {} as any; -}; - -const deserializeAws_json1_1ColumnLFTag = (output: any, context: __SerdeContext): ColumnLFTag => { - return { - LFTags: - output.LFTags !== undefined && output.LFTags !== null - ? deserializeAws_json1_1LFTagsList(output.LFTags, context) - : undefined, - Name: __expectString(output.Name), - } as any; -}; - -const deserializeAws_json1_1ColumnLFTagsList = (output: any, context: __SerdeContext): ColumnLFTag[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1ColumnLFTag(entry, context); - }); -}; - -const deserializeAws_json1_1ColumnNames = (output: any, context: __SerdeContext): string[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return __expectString(entry) as any; - }); -}; - -const deserializeAws_json1_1ColumnWildcard = (output: any, context: __SerdeContext): ColumnWildcard => { - return { - ExcludedColumnNames: - output.ExcludedColumnNames !== undefined && output.ExcludedColumnNames !== null - ? deserializeAws_json1_1ColumnNames(output.ExcludedColumnNames, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1ConcurrentModificationException = ( - output: any, - context: __SerdeContext -): ConcurrentModificationException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1CreateLFTagResponse = (output: any, context: __SerdeContext): CreateLFTagResponse => { - return {} as any; -}; - -const deserializeAws_json1_1DatabaseLFTagsList = (output: any, context: __SerdeContext): TaggedDatabase[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1TaggedDatabase(entry, context); - }); -}; - -const deserializeAws_json1_1DatabaseResource = (output: any, context: __SerdeContext): DatabaseResource => { - return { - CatalogId: __expectString(output.CatalogId), - Name: __expectString(output.Name), - } as any; -}; - -const deserializeAws_json1_1DataLakePrincipal = (output: any, context: __SerdeContext): DataLakePrincipal => { - return { - DataLakePrincipalIdentifier: __expectString(output.DataLakePrincipalIdentifier), - } as any; -}; - -const deserializeAws_json1_1DataLakePrincipalList = (output: any, context: __SerdeContext): DataLakePrincipal[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1DataLakePrincipal(entry, context); - }); -}; - -const deserializeAws_json1_1DataLakeSettings = (output: any, context: __SerdeContext): DataLakeSettings => { - return { - CreateDatabaseDefaultPermissions: - output.CreateDatabaseDefaultPermissions !== undefined && output.CreateDatabaseDefaultPermissions !== null - ? deserializeAws_json1_1PrincipalPermissionsList(output.CreateDatabaseDefaultPermissions, context) - : undefined, - CreateTableDefaultPermissions: - output.CreateTableDefaultPermissions !== undefined && output.CreateTableDefaultPermissions !== null - ? deserializeAws_json1_1PrincipalPermissionsList(output.CreateTableDefaultPermissions, context) - : undefined, - DataLakeAdmins: - output.DataLakeAdmins !== undefined && output.DataLakeAdmins !== null - ? deserializeAws_json1_1DataLakePrincipalList(output.DataLakeAdmins, context) - : undefined, - TrustedResourceOwners: - output.TrustedResourceOwners !== undefined && output.TrustedResourceOwners !== null - ? deserializeAws_json1_1TrustedResourceOwners(output.TrustedResourceOwners, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1DataLocationResource = (output: any, context: __SerdeContext): DataLocationResource => { - return { - CatalogId: __expectString(output.CatalogId), - ResourceArn: __expectString(output.ResourceArn), - } as any; -}; - -const deserializeAws_json1_1DeleteLFTagResponse = (output: any, context: __SerdeContext): DeleteLFTagResponse => { - return {} as any; -}; - -const deserializeAws_json1_1DeregisterResourceResponse = ( - output: any, - context: __SerdeContext -): DeregisterResourceResponse => { - return {} as any; -}; - -const deserializeAws_json1_1DescribeResourceResponse = ( - output: any, - context: __SerdeContext -): DescribeResourceResponse => { - return { - ResourceInfo: - output.ResourceInfo !== undefined && output.ResourceInfo !== null - ? deserializeAws_json1_1ResourceInfo(output.ResourceInfo, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1DetailsMap = (output: any, context: __SerdeContext): DetailsMap => { - return { - ResourceShare: - output.ResourceShare !== undefined && output.ResourceShare !== null - ? deserializeAws_json1_1ResourceShareList(output.ResourceShare, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1EntityNotFoundException = ( - output: any, - context: __SerdeContext -): EntityNotFoundException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1ErrorDetail = (output: any, context: __SerdeContext): ErrorDetail => { - return { - ErrorCode: __expectString(output.ErrorCode), - ErrorMessage: __expectString(output.ErrorMessage), - } as any; -}; - -const deserializeAws_json1_1Expression = (output: any, context: __SerdeContext): LFTag[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1LFTag(entry, context); - }); -}; - -const deserializeAws_json1_1GetDataLakeSettingsResponse = ( - output: any, - context: __SerdeContext -): GetDataLakeSettingsResponse => { - return { - DataLakeSettings: - output.DataLakeSettings !== undefined && output.DataLakeSettings !== null - ? deserializeAws_json1_1DataLakeSettings(output.DataLakeSettings, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1GetEffectivePermissionsForPathResponse = ( - output: any, - context: __SerdeContext -): GetEffectivePermissionsForPathResponse => { - return { - NextToken: __expectString(output.NextToken), - Permissions: - output.Permissions !== undefined && output.Permissions !== null - ? deserializeAws_json1_1PrincipalResourcePermissionsList(output.Permissions, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1GetLFTagResponse = (output: any, context: __SerdeContext): GetLFTagResponse => { - return { - CatalogId: __expectString(output.CatalogId), - TagKey: __expectString(output.TagKey), - TagValues: - output.TagValues !== undefined && output.TagValues !== null - ? deserializeAws_json1_1TagValueList(output.TagValues, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1GetResourceLFTagsResponse = ( - output: any, - context: __SerdeContext -): GetResourceLFTagsResponse => { - return { - LFTagOnDatabase: - output.LFTagOnDatabase !== undefined && output.LFTagOnDatabase !== null - ? deserializeAws_json1_1LFTagsList(output.LFTagOnDatabase, context) - : undefined, - LFTagsOnColumns: - output.LFTagsOnColumns !== undefined && output.LFTagsOnColumns !== null - ? deserializeAws_json1_1ColumnLFTagsList(output.LFTagsOnColumns, context) - : undefined, - LFTagsOnTable: - output.LFTagsOnTable !== undefined && output.LFTagsOnTable !== null - ? deserializeAws_json1_1LFTagsList(output.LFTagsOnTable, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1GlueEncryptionException = ( - output: any, - context: __SerdeContext -): GlueEncryptionException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1GrantPermissionsResponse = ( - output: any, - context: __SerdeContext -): GrantPermissionsResponse => { - return {} as any; -}; - -const deserializeAws_json1_1InternalServiceException = ( - output: any, - context: __SerdeContext -): InternalServiceException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1InvalidInputException = (output: any, context: __SerdeContext): InvalidInputException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1LFTag = (output: any, context: __SerdeContext): LFTag => { - return { - TagKey: __expectString(output.TagKey), - TagValues: - output.TagValues !== undefined && output.TagValues !== null - ? deserializeAws_json1_1TagValueList(output.TagValues, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1LFTagError = (output: any, context: __SerdeContext): LFTagError => { - return { - Error: - output.Error !== undefined && output.Error !== null - ? deserializeAws_json1_1ErrorDetail(output.Error, context) - : undefined, - LFTag: - output.LFTag !== undefined && output.LFTag !== null - ? deserializeAws_json1_1LFTagPair(output.LFTag, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1LFTagErrors = (output: any, context: __SerdeContext): LFTagError[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1LFTagError(entry, context); - }); -}; - -const deserializeAws_json1_1LFTagKeyResource = (output: any, context: __SerdeContext): LFTagKeyResource => { - return { - CatalogId: __expectString(output.CatalogId), - TagKey: __expectString(output.TagKey), - TagValues: - output.TagValues !== undefined && output.TagValues !== null - ? deserializeAws_json1_1TagValueList(output.TagValues, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1LFTagPair = (output: any, context: __SerdeContext): LFTagPair => { - return { - CatalogId: __expectString(output.CatalogId), - TagKey: __expectString(output.TagKey), - TagValues: - output.TagValues !== undefined && output.TagValues !== null - ? deserializeAws_json1_1TagValueList(output.TagValues, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1LFTagPolicyResource = (output: any, context: __SerdeContext): LFTagPolicyResource => { - return { - CatalogId: __expectString(output.CatalogId), - Expression: - output.Expression !== undefined && output.Expression !== null - ? deserializeAws_json1_1Expression(output.Expression, context) - : undefined, - ResourceType: __expectString(output.ResourceType), - } as any; -}; - -const deserializeAws_json1_1LFTagsList = (output: any, context: __SerdeContext): LFTagPair[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1LFTagPair(entry, context); - }); -}; - -const deserializeAws_json1_1ListLFTagsResponse = (output: any, context: __SerdeContext): ListLFTagsResponse => { - return { - LFTags: - output.LFTags !== undefined && output.LFTags !== null - ? deserializeAws_json1_1LFTagsList(output.LFTags, context) - : undefined, - NextToken: __expectString(output.NextToken), - } as any; -}; - -const deserializeAws_json1_1ListPermissionsResponse = ( - output: any, - context: __SerdeContext -): ListPermissionsResponse => { - return { - NextToken: __expectString(output.NextToken), - PrincipalResourcePermissions: - output.PrincipalResourcePermissions !== undefined && output.PrincipalResourcePermissions !== null - ? deserializeAws_json1_1PrincipalResourcePermissionsList(output.PrincipalResourcePermissions, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1ListResourcesResponse = (output: any, context: __SerdeContext): ListResourcesResponse => { - return { - NextToken: __expectString(output.NextToken), - ResourceInfoList: - output.ResourceInfoList !== undefined && output.ResourceInfoList !== null - ? deserializeAws_json1_1ResourceInfoList(output.ResourceInfoList, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1OperationTimeoutException = ( - output: any, - context: __SerdeContext -): OperationTimeoutException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1PermissionList = (output: any, context: __SerdeContext): (Permission | string)[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return __expectString(entry) as any; - }); -}; - -const deserializeAws_json1_1PrincipalPermissions = (output: any, context: __SerdeContext): PrincipalPermissions => { - return { - Permissions: - output.Permissions !== undefined && output.Permissions !== null - ? deserializeAws_json1_1PermissionList(output.Permissions, context) - : undefined, - Principal: - output.Principal !== undefined && output.Principal !== null - ? deserializeAws_json1_1DataLakePrincipal(output.Principal, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1PrincipalPermissionsList = ( - output: any, - context: __SerdeContext -): PrincipalPermissions[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1PrincipalPermissions(entry, context); - }); -}; - -const deserializeAws_json1_1PrincipalResourcePermissions = ( - output: any, - context: __SerdeContext -): PrincipalResourcePermissions => { - return { - AdditionalDetails: - output.AdditionalDetails !== undefined && output.AdditionalDetails !== null - ? deserializeAws_json1_1DetailsMap(output.AdditionalDetails, context) - : undefined, - Permissions: - output.Permissions !== undefined && output.Permissions !== null - ? deserializeAws_json1_1PermissionList(output.Permissions, context) - : undefined, - PermissionsWithGrantOption: - output.PermissionsWithGrantOption !== undefined && output.PermissionsWithGrantOption !== null - ? deserializeAws_json1_1PermissionList(output.PermissionsWithGrantOption, context) - : undefined, - Principal: - output.Principal !== undefined && output.Principal !== null - ? deserializeAws_json1_1DataLakePrincipal(output.Principal, context) - : undefined, - Resource: - output.Resource !== undefined && output.Resource !== null - ? deserializeAws_json1_1Resource(output.Resource, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1PrincipalResourcePermissionsList = ( - output: any, - context: __SerdeContext -): PrincipalResourcePermissions[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1PrincipalResourcePermissions(entry, context); - }); -}; - -const deserializeAws_json1_1PutDataLakeSettingsResponse = ( - output: any, - context: __SerdeContext -): PutDataLakeSettingsResponse => { - return {} as any; -}; - -const deserializeAws_json1_1RegisterResourceResponse = ( - output: any, - context: __SerdeContext -): RegisterResourceResponse => { - return {} as any; -}; - -const deserializeAws_json1_1RemoveLFTagsFromResourceResponse = ( - output: any, - context: __SerdeContext -): RemoveLFTagsFromResourceResponse => { - return { - Failures: - output.Failures !== undefined && output.Failures !== null - ? deserializeAws_json1_1LFTagErrors(output.Failures, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1Resource = (output: any, context: __SerdeContext): Resource => { - return { - Catalog: - output.Catalog !== undefined && output.Catalog !== null - ? deserializeAws_json1_1CatalogResource(output.Catalog, context) - : undefined, - DataLocation: - output.DataLocation !== undefined && output.DataLocation !== null - ? deserializeAws_json1_1DataLocationResource(output.DataLocation, context) - : undefined, - Database: - output.Database !== undefined && output.Database !== null - ? deserializeAws_json1_1DatabaseResource(output.Database, context) - : undefined, - LFTag: - output.LFTag !== undefined && output.LFTag !== null - ? deserializeAws_json1_1LFTagKeyResource(output.LFTag, context) - : undefined, - LFTagPolicy: - output.LFTagPolicy !== undefined && output.LFTagPolicy !== null - ? deserializeAws_json1_1LFTagPolicyResource(output.LFTagPolicy, context) - : undefined, - Table: - output.Table !== undefined && output.Table !== null - ? deserializeAws_json1_1TableResource(output.Table, context) - : undefined, - TableWithColumns: - output.TableWithColumns !== undefined && output.TableWithColumns !== null - ? deserializeAws_json1_1TableWithColumnsResource(output.TableWithColumns, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1ResourceInfo = (output: any, context: __SerdeContext): ResourceInfo => { - return { - LastModified: - output.LastModified !== undefined && output.LastModified !== null - ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModified))) - : undefined, - ResourceArn: __expectString(output.ResourceArn), - RoleArn: __expectString(output.RoleArn), - } as any; -}; - -const deserializeAws_json1_1ResourceInfoList = (output: any, context: __SerdeContext): ResourceInfo[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1ResourceInfo(entry, context); - }); -}; - -const deserializeAws_json1_1ResourceNumberLimitExceededException = ( - output: any, - context: __SerdeContext -): ResourceNumberLimitExceededException => { - return { - Message: __expectString(output.Message), - } as any; -}; - -const deserializeAws_json1_1ResourceShareList = (output: any, context: __SerdeContext): string[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return __expectString(entry) as any; - }); -}; - -const deserializeAws_json1_1RevokePermissionsResponse = ( - output: any, - context: __SerdeContext -): RevokePermissionsResponse => { - return {} as any; -}; - -const deserializeAws_json1_1SearchDatabasesByLFTagsResponse = ( - output: any, - context: __SerdeContext -): SearchDatabasesByLFTagsResponse => { - return { - DatabaseList: - output.DatabaseList !== undefined && output.DatabaseList !== null - ? deserializeAws_json1_1DatabaseLFTagsList(output.DatabaseList, context) - : undefined, - NextToken: __expectString(output.NextToken), - } as any; -}; - -const deserializeAws_json1_1SearchTablesByLFTagsResponse = ( - output: any, - context: __SerdeContext -): SearchTablesByLFTagsResponse => { - return { - NextToken: __expectString(output.NextToken), - TableList: - output.TableList !== undefined && output.TableList !== null - ? deserializeAws_json1_1TableLFTagsList(output.TableList, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1TableLFTagsList = (output: any, context: __SerdeContext): TaggedTable[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return deserializeAws_json1_1TaggedTable(entry, context); - }); -}; - -const deserializeAws_json1_1TableResource = (output: any, context: __SerdeContext): TableResource => { - return { - CatalogId: __expectString(output.CatalogId), - DatabaseName: __expectString(output.DatabaseName), - Name: __expectString(output.Name), - TableWildcard: - output.TableWildcard !== undefined && output.TableWildcard !== null - ? deserializeAws_json1_1TableWildcard(output.TableWildcard, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1TableWildcard = (output: any, context: __SerdeContext): TableWildcard => { - return {} as any; -}; - -const deserializeAws_json1_1TableWithColumnsResource = ( - output: any, - context: __SerdeContext -): TableWithColumnsResource => { - return { - CatalogId: __expectString(output.CatalogId), - ColumnNames: - output.ColumnNames !== undefined && output.ColumnNames !== null - ? deserializeAws_json1_1ColumnNames(output.ColumnNames, context) - : undefined, - ColumnWildcard: - output.ColumnWildcard !== undefined && output.ColumnWildcard !== null - ? deserializeAws_json1_1ColumnWildcard(output.ColumnWildcard, context) - : undefined, - DatabaseName: __expectString(output.DatabaseName), - Name: __expectString(output.Name), - } as any; -}; - -const deserializeAws_json1_1TaggedDatabase = (output: any, context: __SerdeContext): TaggedDatabase => { - return { - Database: - output.Database !== undefined && output.Database !== null - ? deserializeAws_json1_1DatabaseResource(output.Database, context) - : undefined, - LFTags: - output.LFTags !== undefined && output.LFTags !== null - ? deserializeAws_json1_1LFTagsList(output.LFTags, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1TaggedTable = (output: any, context: __SerdeContext): TaggedTable => { - return { - LFTagOnDatabase: - output.LFTagOnDatabase !== undefined && output.LFTagOnDatabase !== null - ? deserializeAws_json1_1LFTagsList(output.LFTagOnDatabase, context) - : undefined, - LFTagsOnColumns: - output.LFTagsOnColumns !== undefined && output.LFTagsOnColumns !== null - ? deserializeAws_json1_1ColumnLFTagsList(output.LFTagsOnColumns, context) - : undefined, - LFTagsOnTable: - output.LFTagsOnTable !== undefined && output.LFTagsOnTable !== null - ? deserializeAws_json1_1LFTagsList(output.LFTagsOnTable, context) - : undefined, - Table: - output.Table !== undefined && output.Table !== null - ? deserializeAws_json1_1TableResource(output.Table, context) - : undefined, - } as any; -}; - -const deserializeAws_json1_1TagValueList = (output: any, context: __SerdeContext): string[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return __expectString(entry) as any; - }); -}; - -const deserializeAws_json1_1TrustedResourceOwners = (output: any, context: __SerdeContext): string[] => { - return (output || []) - .filter((e: any) => e != null) - .map((entry: any) => { - if (entry === null) { - return null as any; - } - return __expectString(entry) as any; - }); -}; - -const deserializeAws_json1_1UpdateLFTagResponse = (output: any, context: __SerdeContext): UpdateLFTagResponse => { - return {} as any; -}; - -const deserializeAws_json1_1UpdateResourceResponse = (output: any, context: __SerdeContext): UpdateResourceResponse => { - return {} as any; -}; - -const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ - httpStatusCode: output.statusCode, - requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], - extendedRequestId: output.headers["x-amz-id-2"], - cfId: output.headers["x-amz-cf-id"], -}); - -// Collect low-level response body stream to Uint8Array. -const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { - if (streamBody instanceof Uint8Array) { - return Promise.resolve(streamBody); - } - return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); -}; - -// Encode Uint8Array data into string with utf-8. -const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => - collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); - -const buildHttpRpcRequest = async ( - context: __SerdeContext, - headers: __HeaderBag, - path: string, - resolvedHostname: string | undefined, - body: any -): Promise<__HttpRequest> => { - const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); - const contents: any = { - protocol, - hostname, - port, - method: "POST", - path: basePath.endsWith("/") ? basePath.slice(0, -1) + path : basePath + path, - headers, - }; - if (resolvedHostname !== undefined) { - contents.hostname = resolvedHostname; - } - if (body !== undefined) { - contents.body = body; - } - return new __HttpRequest(contents); -}; - -const parseBody = (streamBody: any, context: __SerdeContext): any => - collectBodyString(streamBody, context).then((encoded) => { - if (encoded.length) { - return JSON.parse(encoded); - } - return {}; - }); - -/** - * Load an error code for the aws.rest-json-1.1 protocol. - */ -const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { - const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); - - const sanitizeErrorCode = (rawValue: string): string => { - let cleanValue = rawValue; - if (cleanValue.indexOf(":") >= 0) { - cleanValue = cleanValue.split(":")[0]; - } - if (cleanValue.indexOf("#") >= 0) { - cleanValue = cleanValue.split("#")[1]; - } - return cleanValue; - }; - - const headerKey = findKey(output.headers, "x-amzn-errortype"); - if (headerKey !== undefined) { - return sanitizeErrorCode(output.headers[headerKey]); - } - - if (data.code !== undefined) { - return sanitizeErrorCode(data.code); - } - - if (data["__type"] !== undefined) { - return sanitizeErrorCode(data["__type"]); - } - - return ""; -}; diff --git a/clients/client-lakeformation/src/protocols/Aws_restJson1.ts b/clients/client-lakeformation/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..37ae98700552 --- /dev/null +++ b/clients/client-lakeformation/src/protocols/Aws_restJson1.ts @@ -0,0 +1,6580 @@ +import { + HttpRequest as __HttpRequest, + HttpResponse as __HttpResponse, + isValidHostname as __isValidHostname, +} from "@aws-sdk/protocol-http"; +import { + expectLong as __expectLong, + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectObject as __expectObject, + expectString as __expectString, + parseEpochTimestamp as __parseEpochTimestamp, + parseRfc3339DateTime as __parseRfc3339DateTime, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { + AddLFTagsToResourceCommandInput, + AddLFTagsToResourceCommandOutput, +} from "../commands/AddLFTagsToResourceCommand"; +import { + BatchGrantPermissionsCommandInput, + BatchGrantPermissionsCommandOutput, +} from "../commands/BatchGrantPermissionsCommand"; +import { + BatchRevokePermissionsCommandInput, + BatchRevokePermissionsCommandOutput, +} from "../commands/BatchRevokePermissionsCommand"; +import { CancelTransactionCommandInput, CancelTransactionCommandOutput } from "../commands/CancelTransactionCommand"; +import { CommitTransactionCommandInput, CommitTransactionCommandOutput } from "../commands/CommitTransactionCommand"; +import { + CreateDataCellsFilterCommandInput, + CreateDataCellsFilterCommandOutput, +} from "../commands/CreateDataCellsFilterCommand"; +import { CreateLFTagCommandInput, CreateLFTagCommandOutput } from "../commands/CreateLFTagCommand"; +import { + DeleteDataCellsFilterCommandInput, + DeleteDataCellsFilterCommandOutput, +} from "../commands/DeleteDataCellsFilterCommand"; +import { DeleteLFTagCommandInput, DeleteLFTagCommandOutput } from "../commands/DeleteLFTagCommand"; +import { + DeleteObjectsOnCancelCommandInput, + DeleteObjectsOnCancelCommandOutput, +} from "../commands/DeleteObjectsOnCancelCommand"; +import { DeregisterResourceCommandInput, DeregisterResourceCommandOutput } from "../commands/DeregisterResourceCommand"; +import { DescribeResourceCommandInput, DescribeResourceCommandOutput } from "../commands/DescribeResourceCommand"; +import { + DescribeTransactionCommandInput, + DescribeTransactionCommandOutput, +} from "../commands/DescribeTransactionCommand"; +import { ExtendTransactionCommandInput, ExtendTransactionCommandOutput } from "../commands/ExtendTransactionCommand"; +import { + GetDataLakeSettingsCommandInput, + GetDataLakeSettingsCommandOutput, +} from "../commands/GetDataLakeSettingsCommand"; +import { + GetEffectivePermissionsForPathCommandInput, + GetEffectivePermissionsForPathCommandOutput, +} from "../commands/GetEffectivePermissionsForPathCommand"; +import { GetLFTagCommandInput, GetLFTagCommandOutput } from "../commands/GetLFTagCommand"; +import { GetQueryStateCommandInput, GetQueryStateCommandOutput } from "../commands/GetQueryStateCommand"; +import { GetQueryStatisticsCommandInput, GetQueryStatisticsCommandOutput } from "../commands/GetQueryStatisticsCommand"; +import { GetResourceLFTagsCommandInput, GetResourceLFTagsCommandOutput } from "../commands/GetResourceLFTagsCommand"; +import { GetTableObjectsCommandInput, GetTableObjectsCommandOutput } from "../commands/GetTableObjectsCommand"; +import { GetWorkUnitResultsCommandInput, GetWorkUnitResultsCommandOutput } from "../commands/GetWorkUnitResultsCommand"; +import { GetWorkUnitsCommandInput, GetWorkUnitsCommandOutput } from "../commands/GetWorkUnitsCommand"; +import { GrantPermissionsCommandInput, GrantPermissionsCommandOutput } from "../commands/GrantPermissionsCommand"; +import { + ListDataCellsFilterCommandInput, + ListDataCellsFilterCommandOutput, +} from "../commands/ListDataCellsFilterCommand"; +import { ListLFTagsCommandInput, ListLFTagsCommandOutput } from "../commands/ListLFTagsCommand"; +import { ListPermissionsCommandInput, ListPermissionsCommandOutput } from "../commands/ListPermissionsCommand"; +import { ListResourcesCommandInput, ListResourcesCommandOutput } from "../commands/ListResourcesCommand"; +import { + ListTableStorageOptimizersCommandInput, + ListTableStorageOptimizersCommandOutput, +} from "../commands/ListTableStorageOptimizersCommand"; +import { ListTransactionsCommandInput, ListTransactionsCommandOutput } from "../commands/ListTransactionsCommand"; +import { + PutDataLakeSettingsCommandInput, + PutDataLakeSettingsCommandOutput, +} from "../commands/PutDataLakeSettingsCommand"; +import { RegisterResourceCommandInput, RegisterResourceCommandOutput } from "../commands/RegisterResourceCommand"; +import { + RemoveLFTagsFromResourceCommandInput, + RemoveLFTagsFromResourceCommandOutput, +} from "../commands/RemoveLFTagsFromResourceCommand"; +import { RevokePermissionsCommandInput, RevokePermissionsCommandOutput } from "../commands/RevokePermissionsCommand"; +import { + SearchDatabasesByLFTagsCommandInput, + SearchDatabasesByLFTagsCommandOutput, +} from "../commands/SearchDatabasesByLFTagsCommand"; +import { + SearchTablesByLFTagsCommandInput, + SearchTablesByLFTagsCommandOutput, +} from "../commands/SearchTablesByLFTagsCommand"; +import { StartQueryPlanningCommandInput, StartQueryPlanningCommandOutput } from "../commands/StartQueryPlanningCommand"; +import { StartTransactionCommandInput, StartTransactionCommandOutput } from "../commands/StartTransactionCommand"; +import { UpdateLFTagCommandInput, UpdateLFTagCommandOutput } from "../commands/UpdateLFTagCommand"; +import { UpdateResourceCommandInput, UpdateResourceCommandOutput } from "../commands/UpdateResourceCommand"; +import { UpdateTableObjectsCommandInput, UpdateTableObjectsCommandOutput } from "../commands/UpdateTableObjectsCommand"; +import { + UpdateTableStorageOptimizerCommandInput, + UpdateTableStorageOptimizerCommandOutput, +} from "../commands/UpdateTableStorageOptimizerCommand"; +import { + AccessDeniedException, + AddObjectInput, + AllRowsWildcard, + AlreadyExistsException, + BatchPermissionsFailureEntry, + BatchPermissionsRequestEntry, + CatalogResource, + ColumnLFTag, + ColumnWildcard, + ConcurrentModificationException, + DatabaseResource, + DataCellsFilter, + DataCellsFilterResource, + DataLakePrincipal, + DataLakeSettings, + DataLocationResource, + DeleteObjectInput, + DetailsMap, + EntityNotFoundException, + ErrorDetail, + ExecutionStatistics, + ExpiredException, + FilterCondition, + GlueEncryptionException, + InternalServiceException, + InvalidInputException, + LFTag, + LFTagError, + LFTagKeyResource, + LFTagPair, + LFTagPolicyResource, + OperationTimeoutException, + OptimizerType, + PartitionObjects, + Permission, + PlanningStatistics, + PrincipalPermissions, + PrincipalResourcePermissions, + QueryPlanningContext, + Resource, + ResourceInfo, + ResourceNotReadyException, + ResourceNumberLimitExceededException, + RowFilter, + StatisticsNotReadyYetException, + StorageOptimizer, + TableObject, + TableResource, + TableWildcard, + TableWithColumnsResource, + TaggedDatabase, + TaggedTable, + ThrottledException, + TransactionCanceledException, + TransactionCommitInProgressException, + TransactionCommittedException, + TransactionDescription, + VirtualObject, + WorkUnitRange, + WorkUnitsNotReadyYetException, + WriteOperation, +} from "../models/models_0"; + +export const serializeAws_restJson1AddLFTagsToResourceCommand = async ( + input: AddLFTagsToResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/AddLFTagsToResource"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.LFTags !== undefined && + input.LFTags !== null && { LFTags: serializeAws_restJson1LFTagsList(input.LFTags, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1BatchGrantPermissionsCommand = async ( + input: BatchGrantPermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/BatchGrantPermissions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Entries !== undefined && + input.Entries !== null && { + Entries: serializeAws_restJson1BatchPermissionsRequestEntryList(input.Entries, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1BatchRevokePermissionsCommand = async ( + input: BatchRevokePermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/BatchRevokePermissions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Entries !== undefined && + input.Entries !== null && { + Entries: serializeAws_restJson1BatchPermissionsRequestEntryList(input.Entries, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CancelTransactionCommand = async ( + input: CancelTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/CancelTransaction"; + let body: any; + body = JSON.stringify({ + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CommitTransactionCommand = async ( + input: CommitTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/CommitTransaction"; + let body: any; + body = JSON.stringify({ + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateDataCellsFilterCommand = async ( + input: CreateDataCellsFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/CreateDataCellsFilter"; + let body: any; + body = JSON.stringify({ + ...(input.TableData !== undefined && + input.TableData !== null && { TableData: serializeAws_restJson1DataCellsFilter(input.TableData, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateLFTagCommand = async ( + input: CreateLFTagCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/CreateLFTag"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + ...(input.TagValues !== undefined && + input.TagValues !== null && { TagValues: serializeAws_restJson1TagValueList(input.TagValues, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteDataCellsFilterCommand = async ( + input: DeleteDataCellsFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DeleteDataCellsFilter"; + let body: any; + body = JSON.stringify({ + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.TableCatalogId !== undefined && + input.TableCatalogId !== null && { TableCatalogId: input.TableCatalogId }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteLFTagCommand = async ( + input: DeleteLFTagCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DeleteLFTag"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteObjectsOnCancelCommand = async ( + input: DeleteObjectsOnCancelCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DeleteObjectsOnCancel"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Objects !== undefined && + input.Objects !== null && { Objects: serializeAws_restJson1VirtualObjectList(input.Objects, context) }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeregisterResourceCommand = async ( + input: DeregisterResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DeregisterResource"; + let body: any; + body = JSON.stringify({ + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DescribeResourceCommand = async ( + input: DescribeResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DescribeResource"; + let body: any; + body = JSON.stringify({ + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DescribeTransactionCommand = async ( + input: DescribeTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/DescribeTransaction"; + let body: any; + body = JSON.stringify({ + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ExtendTransactionCommand = async ( + input: ExtendTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ExtendTransaction"; + let body: any; + body = JSON.stringify({ + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetDataLakeSettingsCommand = async ( + input: GetDataLakeSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetDataLakeSettings"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetEffectivePermissionsForPathCommand = async ( + input: GetEffectivePermissionsForPathCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetEffectivePermissionsForPath"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetLFTagCommand = async ( + input: GetLFTagCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetLFTag"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetQueryStateCommand = async ( + input: GetQueryStateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetQueryState"; + let body: any; + body = JSON.stringify({ + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "query-" + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetQueryStatisticsCommand = async ( + input: GetQueryStatisticsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetQueryStatistics"; + let body: any; + body = JSON.stringify({ + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "query-" + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetResourceLFTagsCommand = async ( + input: GetResourceLFTagsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetResourceLFTags"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + ...(input.ShowAssignedLFTags !== undefined && + input.ShowAssignedLFTags !== null && { ShowAssignedLFTags: input.ShowAssignedLFTags }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetTableObjectsCommand = async ( + input: GetTableObjectsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetTableObjects"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.PartitionPredicate !== undefined && + input.PartitionPredicate !== null && { PartitionPredicate: input.PartitionPredicate }), + ...(input.QueryAsOfTime !== undefined && + input.QueryAsOfTime !== null && { QueryAsOfTime: Math.round(input.QueryAsOfTime.getTime() / 1000) }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetWorkUnitResultsCommand = async ( + input: GetWorkUnitResultsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetWorkUnitResults"; + let body: any; + body = JSON.stringify({ + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + ...(input.WorkUnitId !== undefined && input.WorkUnitId !== null && { WorkUnitId: input.WorkUnitId }), + ...(input.WorkUnitToken !== undefined && input.WorkUnitToken !== null && { WorkUnitToken: input.WorkUnitToken }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "data-" + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetWorkUnitsCommand = async ( + input: GetWorkUnitsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GetWorkUnits"; + let body: any; + body = JSON.stringify({ + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.PageSize !== undefined && input.PageSize !== null && { PageSize: input.PageSize }), + ...(input.QueryId !== undefined && input.QueryId !== null && { QueryId: input.QueryId }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "query-" + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GrantPermissionsCommand = async ( + input: GrantPermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/GrantPermissions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.PermissionsWithGrantOption !== undefined && + input.PermissionsWithGrantOption !== null && { + PermissionsWithGrantOption: serializeAws_restJson1PermissionList(input.PermissionsWithGrantOption, context), + }), + ...(input.Principal !== undefined && + input.Principal !== null && { Principal: serializeAws_restJson1DataLakePrincipal(input.Principal, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListDataCellsFilterCommand = async ( + input: ListDataCellsFilterCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListDataCellsFilter"; + let body: any; + body = JSON.stringify({ + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.Table !== undefined && + input.Table !== null && { Table: serializeAws_restJson1TableResource(input.Table, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListLFTagsCommand = async ( + input: ListLFTagsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListLFTags"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.ResourceShareType !== undefined && + input.ResourceShareType !== null && { ResourceShareType: input.ResourceShareType }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListPermissionsCommand = async ( + input: ListPermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListPermissions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.IncludeRelated !== undefined && + input.IncludeRelated !== null && { IncludeRelated: input.IncludeRelated }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.Principal !== undefined && + input.Principal !== null && { Principal: serializeAws_restJson1DataLakePrincipal(input.Principal, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListResourcesCommand = async ( + input: ListResourcesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListResources"; + let body: any; + body = JSON.stringify({ + ...(input.FilterConditionList !== undefined && + input.FilterConditionList !== null && { + FilterConditionList: serializeAws_restJson1FilterConditionList(input.FilterConditionList, context), + }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTableStorageOptimizersCommand = async ( + input: ListTableStorageOptimizersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListTableStorageOptimizers"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.StorageOptimizerType !== undefined && + input.StorageOptimizerType !== null && { StorageOptimizerType: input.StorageOptimizerType }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTransactionsCommand = async ( + input: ListTransactionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/ListTransactions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.StatusFilter !== undefined && input.StatusFilter !== null && { StatusFilter: input.StatusFilter }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1PutDataLakeSettingsCommand = async ( + input: PutDataLakeSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/PutDataLakeSettings"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DataLakeSettings !== undefined && + input.DataLakeSettings !== null && { + DataLakeSettings: serializeAws_restJson1DataLakeSettings(input.DataLakeSettings, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1RegisterResourceCommand = async ( + input: RegisterResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/RegisterResource"; + let body: any; + body = JSON.stringify({ + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + ...(input.RoleArn !== undefined && input.RoleArn !== null && { RoleArn: input.RoleArn }), + ...(input.UseServiceLinkedRole !== undefined && + input.UseServiceLinkedRole !== null && { UseServiceLinkedRole: input.UseServiceLinkedRole }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1RemoveLFTagsFromResourceCommand = async ( + input: RemoveLFTagsFromResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/RemoveLFTagsFromResource"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.LFTags !== undefined && + input.LFTags !== null && { LFTags: serializeAws_restJson1LFTagsList(input.LFTags, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1RevokePermissionsCommand = async ( + input: RevokePermissionsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/RevokePermissions"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.PermissionsWithGrantOption !== undefined && + input.PermissionsWithGrantOption !== null && { + PermissionsWithGrantOption: serializeAws_restJson1PermissionList(input.PermissionsWithGrantOption, context), + }), + ...(input.Principal !== undefined && + input.Principal !== null && { Principal: serializeAws_restJson1DataLakePrincipal(input.Principal, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1SearchDatabasesByLFTagsCommand = async ( + input: SearchDatabasesByLFTagsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/SearchDatabasesByLFTags"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Expression !== undefined && + input.Expression !== null && { Expression: serializeAws_restJson1Expression(input.Expression, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1SearchTablesByLFTagsCommand = async ( + input: SearchTablesByLFTagsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/SearchTablesByLFTags"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Expression !== undefined && + input.Expression !== null && { Expression: serializeAws_restJson1Expression(input.Expression, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StartQueryPlanningCommand = async ( + input: StartQueryPlanningCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/StartQueryPlanning"; + let body: any; + body = JSON.stringify({ + ...(input.QueryPlanningContext !== undefined && + input.QueryPlanningContext !== null && { + QueryPlanningContext: serializeAws_restJson1QueryPlanningContext(input.QueryPlanningContext, context), + }), + ...(input.QueryString !== undefined && input.QueryString !== null && { QueryString: input.QueryString }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "query-" + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1StartTransactionCommand = async ( + input: StartTransactionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/StartTransaction"; + let body: any; + body = JSON.stringify({ + ...(input.TransactionType !== undefined && + input.TransactionType !== null && { TransactionType: input.TransactionType }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateLFTagCommand = async ( + input: UpdateLFTagCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/UpdateLFTag"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + ...(input.TagValuesToAdd !== undefined && + input.TagValuesToAdd !== null && { + TagValuesToAdd: serializeAws_restJson1TagValueList(input.TagValuesToAdd, context), + }), + ...(input.TagValuesToDelete !== undefined && + input.TagValuesToDelete !== null && { + TagValuesToDelete: serializeAws_restJson1TagValueList(input.TagValuesToDelete, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateResourceCommand = async ( + input: UpdateResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/UpdateResource"; + let body: any; + body = JSON.stringify({ + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + ...(input.RoleArn !== undefined && input.RoleArn !== null && { RoleArn: input.RoleArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateTableObjectsCommand = async ( + input: UpdateTableObjectsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/UpdateTableObjects"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + ...(input.WriteOperations !== undefined && + input.WriteOperations !== null && { + WriteOperations: serializeAws_restJson1WriteOperationList(input.WriteOperations, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateTableStorageOptimizerCommand = async ( + input: UpdateTableStorageOptimizerCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/UpdateTableStorageOptimizer"; + let body: any; + body = JSON.stringify({ + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.StorageOptimizerConfig !== undefined && + input.StorageOptimizerConfig !== null && { + StorageOptimizerConfig: serializeAws_restJson1StorageOptimizerConfigMap(input.StorageOptimizerConfig, context), + }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1AddLFTagsToResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AddLFTagsToResourceCommandError(output, context); + } + const contents: AddLFTagsToResourceCommandOutput = { + $metadata: deserializeMetadata(output), + Failures: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Failures !== undefined && data.Failures !== null) { + contents.Failures = deserializeAws_restJson1LFTagErrors(data.Failures, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AddLFTagsToResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1BatchGrantPermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchGrantPermissionsCommandError(output, context); + } + const contents: BatchGrantPermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + Failures: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Failures !== undefined && data.Failures !== null) { + contents.Failures = deserializeAws_restJson1BatchPermissionsFailureList(data.Failures, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchGrantPermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1BatchRevokePermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1BatchRevokePermissionsCommandError(output, context); + } + const contents: BatchRevokePermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + Failures: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Failures !== undefined && data.Failures !== null) { + contents.Failures = deserializeAws_restJson1BatchPermissionsFailureList(data.Failures, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1BatchRevokePermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CancelTransactionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CancelTransactionCommandError(output, context); + } + const contents: CancelTransactionCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CancelTransactionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommitInProgressException": + case "com.amazonaws.lakeformation#TransactionCommitInProgressException": + response = { + ...(await deserializeAws_restJson1TransactionCommitInProgressExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommittedException": + case "com.amazonaws.lakeformation#TransactionCommittedException": + response = { + ...(await deserializeAws_restJson1TransactionCommittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CommitTransactionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CommitTransactionCommandError(output, context); + } + const contents: CommitTransactionCommandOutput = { + $metadata: deserializeMetadata(output), + TransactionStatus: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.TransactionStatus !== undefined && data.TransactionStatus !== null) { + contents.TransactionStatus = __expectString(data.TransactionStatus); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CommitTransactionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCanceledException": + case "com.amazonaws.lakeformation#TransactionCanceledException": + response = { + ...(await deserializeAws_restJson1TransactionCanceledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateDataCellsFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateDataCellsFilterCommandError(output, context); + } + const contents: CreateDataCellsFilterCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateDataCellsFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "AlreadyExistsException": + case "com.amazonaws.lakeformation#AlreadyExistsException": + response = { + ...(await deserializeAws_restJson1AlreadyExistsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNumberLimitExceededException": + case "com.amazonaws.lakeformation#ResourceNumberLimitExceededException": + response = { + ...(await deserializeAws_restJson1ResourceNumberLimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateLFTagCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateLFTagCommandError(output, context); + } + const contents: CreateLFTagCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateLFTagCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNumberLimitExceededException": + case "com.amazonaws.lakeformation#ResourceNumberLimitExceededException": + response = { + ...(await deserializeAws_restJson1ResourceNumberLimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteDataCellsFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteDataCellsFilterCommandError(output, context); + } + const contents: DeleteDataCellsFilterCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteDataCellsFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteLFTagCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteLFTagCommandError(output, context); + } + const contents: DeleteLFTagCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteLFTagCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteObjectsOnCancelCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteObjectsOnCancelCommandError(output, context); + } + const contents: DeleteObjectsOnCancelCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteObjectsOnCancelCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotReadyException": + case "com.amazonaws.lakeformation#ResourceNotReadyException": + response = { + ...(await deserializeAws_restJson1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCanceledException": + case "com.amazonaws.lakeformation#TransactionCanceledException": + response = { + ...(await deserializeAws_restJson1TransactionCanceledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommittedException": + case "com.amazonaws.lakeformation#TransactionCommittedException": + response = { + ...(await deserializeAws_restJson1TransactionCommittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeregisterResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeregisterResourceCommandError(output, context); + } + const contents: DeregisterResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeregisterResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DescribeResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeResourceCommandError(output, context); + } + const contents: DescribeResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ResourceInfo: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.ResourceInfo !== undefined && data.ResourceInfo !== null) { + contents.ResourceInfo = deserializeAws_restJson1ResourceInfo(data.ResourceInfo, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DescribeTransactionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DescribeTransactionCommandError(output, context); + } + const contents: DescribeTransactionCommandOutput = { + $metadata: deserializeMetadata(output), + TransactionDescription: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.TransactionDescription !== undefined && data.TransactionDescription !== null) { + contents.TransactionDescription = deserializeAws_restJson1TransactionDescription( + data.TransactionDescription, + context + ); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DescribeTransactionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ExtendTransactionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ExtendTransactionCommandError(output, context); + } + const contents: ExtendTransactionCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ExtendTransactionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCanceledException": + case "com.amazonaws.lakeformation#TransactionCanceledException": + response = { + ...(await deserializeAws_restJson1TransactionCanceledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommitInProgressException": + case "com.amazonaws.lakeformation#TransactionCommitInProgressException": + response = { + ...(await deserializeAws_restJson1TransactionCommitInProgressExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommittedException": + case "com.amazonaws.lakeformation#TransactionCommittedException": + response = { + ...(await deserializeAws_restJson1TransactionCommittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetDataLakeSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetDataLakeSettingsCommandError(output, context); + } + const contents: GetDataLakeSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + DataLakeSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.DataLakeSettings !== undefined && data.DataLakeSettings !== null) { + contents.DataLakeSettings = deserializeAws_restJson1DataLakeSettings(data.DataLakeSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetDataLakeSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetEffectivePermissionsForPathCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetEffectivePermissionsForPathCommandError(output, context); + } + const contents: GetEffectivePermissionsForPathCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + Permissions: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.Permissions !== undefined && data.Permissions !== null) { + contents.Permissions = deserializeAws_restJson1PrincipalResourcePermissionsList(data.Permissions, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetEffectivePermissionsForPathCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetLFTagCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetLFTagCommandError(output, context); + } + const contents: GetLFTagCommandOutput = { + $metadata: deserializeMetadata(output), + CatalogId: undefined, + TagKey: undefined, + TagValues: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.CatalogId !== undefined && data.CatalogId !== null) { + contents.CatalogId = __expectString(data.CatalogId); + } + if (data.TagKey !== undefined && data.TagKey !== null) { + contents.TagKey = __expectString(data.TagKey); + } + if (data.TagValues !== undefined && data.TagValues !== null) { + contents.TagValues = deserializeAws_restJson1TagValueList(data.TagValues, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetLFTagCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetQueryStateCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetQueryStateCommandError(output, context); + } + const contents: GetQueryStateCommandOutput = { + $metadata: deserializeMetadata(output), + Error: undefined, + State: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Error !== undefined && data.Error !== null) { + contents.Error = __expectString(data.Error); + } + if (data.State !== undefined && data.State !== null) { + contents.State = __expectString(data.State); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetQueryStateCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetQueryStatisticsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetQueryStatisticsCommandError(output, context); + } + const contents: GetQueryStatisticsCommandOutput = { + $metadata: deserializeMetadata(output), + ExecutionStatistics: undefined, + PlanningStatistics: undefined, + QuerySubmissionTime: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.ExecutionStatistics !== undefined && data.ExecutionStatistics !== null) { + contents.ExecutionStatistics = deserializeAws_restJson1ExecutionStatistics(data.ExecutionStatistics, context); + } + if (data.PlanningStatistics !== undefined && data.PlanningStatistics !== null) { + contents.PlanningStatistics = deserializeAws_restJson1PlanningStatistics(data.PlanningStatistics, context); + } + if (data.QuerySubmissionTime !== undefined && data.QuerySubmissionTime !== null) { + contents.QuerySubmissionTime = __expectNonNull(__parseRfc3339DateTime(data.QuerySubmissionTime)); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetQueryStatisticsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ExpiredException": + case "com.amazonaws.lakeformation#ExpiredException": + response = { + ...(await deserializeAws_restJson1ExpiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "StatisticsNotReadyYetException": + case "com.amazonaws.lakeformation#StatisticsNotReadyYetException": + response = { + ...(await deserializeAws_restJson1StatisticsNotReadyYetExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottledException": + case "com.amazonaws.lakeformation#ThrottledException": + response = { + ...(await deserializeAws_restJson1ThrottledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetResourceLFTagsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetResourceLFTagsCommandError(output, context); + } + const contents: GetResourceLFTagsCommandOutput = { + $metadata: deserializeMetadata(output), + LFTagOnDatabase: undefined, + LFTagsOnColumns: undefined, + LFTagsOnTable: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LFTagOnDatabase !== undefined && data.LFTagOnDatabase !== null) { + contents.LFTagOnDatabase = deserializeAws_restJson1LFTagsList(data.LFTagOnDatabase, context); + } + if (data.LFTagsOnColumns !== undefined && data.LFTagsOnColumns !== null) { + contents.LFTagsOnColumns = deserializeAws_restJson1ColumnLFTagsList(data.LFTagsOnColumns, context); + } + if (data.LFTagsOnTable !== undefined && data.LFTagsOnTable !== null) { + contents.LFTagsOnTable = deserializeAws_restJson1LFTagsList(data.LFTagsOnTable, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetResourceLFTagsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.lakeformation#GlueEncryptionException": + response = { + ...(await deserializeAws_restJson1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetTableObjectsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetTableObjectsCommandError(output, context); + } + const contents: GetTableObjectsCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + Objects: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.Objects !== undefined && data.Objects !== null) { + contents.Objects = deserializeAws_restJson1PartitionedTableObjectsList(data.Objects, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetTableObjectsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotReadyException": + case "com.amazonaws.lakeformation#ResourceNotReadyException": + response = { + ...(await deserializeAws_restJson1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCanceledException": + case "com.amazonaws.lakeformation#TransactionCanceledException": + response = { + ...(await deserializeAws_restJson1TransactionCanceledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommittedException": + case "com.amazonaws.lakeformation#TransactionCommittedException": + response = { + ...(await deserializeAws_restJson1TransactionCommittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetWorkUnitResultsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetWorkUnitResultsCommandError(output, context); + } + const contents: GetWorkUnitResultsCommandOutput = { + $metadata: deserializeMetadata(output), + ResultStream: undefined, + }; + const data: any = output.body; + contents.ResultStream = data; + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetWorkUnitResultsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ExpiredException": + case "com.amazonaws.lakeformation#ExpiredException": + response = { + ...(await deserializeAws_restJson1ExpiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottledException": + case "com.amazonaws.lakeformation#ThrottledException": + response = { + ...(await deserializeAws_restJson1ThrottledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetWorkUnitsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetWorkUnitsCommandError(output, context); + } + const contents: GetWorkUnitsCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + QueryId: undefined, + WorkUnitRanges: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.QueryId !== undefined && data.QueryId !== null) { + contents.QueryId = __expectString(data.QueryId); + } + if (data.WorkUnitRanges !== undefined && data.WorkUnitRanges !== null) { + contents.WorkUnitRanges = deserializeAws_restJson1WorkUnitRangeList(data.WorkUnitRanges, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetWorkUnitsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ExpiredException": + case "com.amazonaws.lakeformation#ExpiredException": + response = { + ...(await deserializeAws_restJson1ExpiredExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "WorkUnitsNotReadyYetException": + case "com.amazonaws.lakeformation#WorkUnitsNotReadyYetException": + response = { + ...(await deserializeAws_restJson1WorkUnitsNotReadyYetExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GrantPermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GrantPermissionsCommandError(output, context); + } + const contents: GrantPermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GrantPermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListDataCellsFilterCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListDataCellsFilterCommandError(output, context); + } + const contents: ListDataCellsFilterCommandOutput = { + $metadata: deserializeMetadata(output), + DataCellsFilters: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.DataCellsFilters !== undefined && data.DataCellsFilters !== null) { + contents.DataCellsFilters = deserializeAws_restJson1DataCellsFilterList(data.DataCellsFilters, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListDataCellsFilterCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListLFTagsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListLFTagsCommandError(output, context); + } + const contents: ListLFTagsCommandOutput = { + $metadata: deserializeMetadata(output), + LFTags: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LFTags !== undefined && data.LFTags !== null) { + contents.LFTags = deserializeAws_restJson1LFTagsList(data.LFTags, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListLFTagsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListPermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListPermissionsCommandError(output, context); + } + const contents: ListPermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + PrincipalResourcePermissions: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.PrincipalResourcePermissions !== undefined && data.PrincipalResourcePermissions !== null) { + contents.PrincipalResourcePermissions = deserializeAws_restJson1PrincipalResourcePermissionsList( + data.PrincipalResourcePermissions, + context + ); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListPermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListResourcesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListResourcesCommandError(output, context); + } + const contents: ListResourcesCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + ResourceInfoList: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.ResourceInfoList !== undefined && data.ResourceInfoList !== null) { + contents.ResourceInfoList = deserializeAws_restJson1ResourceInfoList(data.ResourceInfoList, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListResourcesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTableStorageOptimizersCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTableStorageOptimizersCommandError(output, context); + } + const contents: ListTableStorageOptimizersCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + StorageOptimizerList: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.StorageOptimizerList !== undefined && data.StorageOptimizerList !== null) { + contents.StorageOptimizerList = deserializeAws_restJson1StorageOptimizerList(data.StorageOptimizerList, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTableStorageOptimizersCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTransactionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTransactionsCommandError(output, context); + } + const contents: ListTransactionsCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + Transactions: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.Transactions !== undefined && data.Transactions !== null) { + contents.Transactions = deserializeAws_restJson1TransactionDescriptionList(data.Transactions, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTransactionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1PutDataLakeSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1PutDataLakeSettingsCommandError(output, context); + } + const contents: PutDataLakeSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1PutDataLakeSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1RegisterResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1RegisterResourceCommandError(output, context); + } + const contents: RegisterResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1RegisterResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "AlreadyExistsException": + case "com.amazonaws.lakeformation#AlreadyExistsException": + response = { + ...(await deserializeAws_restJson1AlreadyExistsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNumberLimitExceededException": + case "com.amazonaws.lakeformation#ResourceNumberLimitExceededException": + response = { + ...(await deserializeAws_restJson1ResourceNumberLimitExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1RemoveLFTagsFromResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1RemoveLFTagsFromResourceCommandError(output, context); + } + const contents: RemoveLFTagsFromResourceCommandOutput = { + $metadata: deserializeMetadata(output), + Failures: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Failures !== undefined && data.Failures !== null) { + contents.Failures = deserializeAws_restJson1LFTagErrors(data.Failures, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1RemoveLFTagsFromResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.lakeformation#GlueEncryptionException": + response = { + ...(await deserializeAws_restJson1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1RevokePermissionsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1RevokePermissionsCommandError(output, context); + } + const contents: RevokePermissionsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1RevokePermissionsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1SearchDatabasesByLFTagsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1SearchDatabasesByLFTagsCommandError(output, context); + } + const contents: SearchDatabasesByLFTagsCommandOutput = { + $metadata: deserializeMetadata(output), + DatabaseList: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.DatabaseList !== undefined && data.DatabaseList !== null) { + contents.DatabaseList = deserializeAws_restJson1DatabaseLFTagsList(data.DatabaseList, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1SearchDatabasesByLFTagsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.lakeformation#GlueEncryptionException": + response = { + ...(await deserializeAws_restJson1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1SearchTablesByLFTagsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1SearchTablesByLFTagsCommandError(output, context); + } + const contents: SearchTablesByLFTagsCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + TableList: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.TableList !== undefined && data.TableList !== null) { + contents.TableList = deserializeAws_restJson1TableLFTagsList(data.TableList, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1SearchTablesByLFTagsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "GlueEncryptionException": + case "com.amazonaws.lakeformation#GlueEncryptionException": + response = { + ...(await deserializeAws_restJson1GlueEncryptionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StartQueryPlanningCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StartQueryPlanningCommandError(output, context); + } + const contents: StartQueryPlanningCommandOutput = { + $metadata: deserializeMetadata(output), + QueryId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.QueryId !== undefined && data.QueryId !== null) { + contents.QueryId = __expectString(data.QueryId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StartQueryPlanningCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottledException": + case "com.amazonaws.lakeformation#ThrottledException": + response = { + ...(await deserializeAws_restJson1ThrottledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1StartTransactionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1StartTransactionCommandError(output, context); + } + const contents: StartTransactionCommandOutput = { + $metadata: deserializeMetadata(output), + TransactionId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.TransactionId !== undefined && data.TransactionId !== null) { + contents.TransactionId = __expectString(data.TransactionId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1StartTransactionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateLFTagCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateLFTagCommandError(output, context); + } + const contents: UpdateLFTagCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateLFTagCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateResourceCommandError(output, context); + } + const contents: UpdateResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateTableObjectsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateTableObjectsCommandError(output, context); + } + const contents: UpdateTableObjectsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateTableObjectsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "ConcurrentModificationException": + case "com.amazonaws.lakeformation#ConcurrentModificationException": + response = { + ...(await deserializeAws_restJson1ConcurrentModificationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "OperationTimeoutException": + case "com.amazonaws.lakeformation#OperationTimeoutException": + response = { + ...(await deserializeAws_restJson1OperationTimeoutExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotReadyException": + case "com.amazonaws.lakeformation#ResourceNotReadyException": + response = { + ...(await deserializeAws_restJson1ResourceNotReadyExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCanceledException": + case "com.amazonaws.lakeformation#TransactionCanceledException": + response = { + ...(await deserializeAws_restJson1TransactionCanceledExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommitInProgressException": + case "com.amazonaws.lakeformation#TransactionCommitInProgressException": + response = { + ...(await deserializeAws_restJson1TransactionCommitInProgressExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TransactionCommittedException": + case "com.amazonaws.lakeformation#TransactionCommittedException": + response = { + ...(await deserializeAws_restJson1TransactionCommittedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateTableStorageOptimizerCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateTableStorageOptimizerCommandError(output, context); + } + const contents: UpdateTableStorageOptimizerCommandOutput = { + $metadata: deserializeMetadata(output), + Result: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Result !== undefined && data.Result !== null) { + contents.Result = __expectString(data.Result); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateTableStorageOptimizerCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.lakeformation#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "EntityNotFoundException": + case "com.amazonaws.lakeformation#EntityNotFoundException": + response = { + ...(await deserializeAws_restJson1EntityNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServiceException": + case "com.amazonaws.lakeformation#InternalServiceException": + response = { + ...(await deserializeAws_restJson1InternalServiceExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InvalidInputException": + case "com.amazonaws.lakeformation#InvalidInputException": + response = { + ...(await deserializeAws_restJson1InvalidInputExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1AlreadyExistsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AlreadyExistsException = { + name: "AlreadyExistsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ConcurrentModificationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConcurrentModificationException = { + name: "ConcurrentModificationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1EntityNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: EntityNotFoundException = { + name: "EntityNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ExpiredExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ExpiredException = { + name: "ExpiredException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1GlueEncryptionExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: GlueEncryptionException = { + name: "GlueEncryptionException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1InternalServiceExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServiceException = { + name: "InternalServiceException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1InvalidInputExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InvalidInputException = { + name: "InvalidInputException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1OperationTimeoutExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: OperationTimeoutException = { + name: "OperationTimeoutException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotReadyExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotReadyException = { + name: "ResourceNotReadyException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNumberLimitExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNumberLimitExceededException = { + name: "ResourceNumberLimitExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1StatisticsNotReadyYetExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: StatisticsNotReadyYetException = { + name: "StatisticsNotReadyYetException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ThrottledExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottledException = { + name: "ThrottledException", + $fault: "client", + $retryable: { + throttling: true, + }, + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1TransactionCanceledExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TransactionCanceledException = { + name: "TransactionCanceledException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1TransactionCommitInProgressExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TransactionCommitInProgressException = { + name: "TransactionCommitInProgressException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1TransactionCommittedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TransactionCommittedException = { + name: "TransactionCommittedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1WorkUnitsNotReadyYetExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: WorkUnitsNotReadyYetException = { + name: "WorkUnitsNotReadyYetException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const serializeAws_restJson1AddObjectInput = (input: AddObjectInput, context: __SerdeContext): any => { + return { + ...(input.ETag !== undefined && input.ETag !== null && { ETag: input.ETag }), + ...(input.PartitionValues !== undefined && + input.PartitionValues !== null && { + PartitionValues: serializeAws_restJson1PartitionValuesList(input.PartitionValues, context), + }), + ...(input.Size !== undefined && input.Size !== null && { Size: input.Size }), + ...(input.Uri !== undefined && input.Uri !== null && { Uri: input.Uri }), + }; +}; + +const serializeAws_restJson1AllRowsWildcard = (input: AllRowsWildcard, context: __SerdeContext): any => { + return {}; +}; + +const serializeAws_restJson1BatchPermissionsRequestEntry = ( + input: BatchPermissionsRequestEntry, + context: __SerdeContext +): any => { + return { + ...(input.Id !== undefined && input.Id !== null && { Id: input.Id }), + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.PermissionsWithGrantOption !== undefined && + input.PermissionsWithGrantOption !== null && { + PermissionsWithGrantOption: serializeAws_restJson1PermissionList(input.PermissionsWithGrantOption, context), + }), + ...(input.Principal !== undefined && + input.Principal !== null && { Principal: serializeAws_restJson1DataLakePrincipal(input.Principal, context) }), + ...(input.Resource !== undefined && + input.Resource !== null && { Resource: serializeAws_restJson1Resource(input.Resource, context) }), + }; +}; + +const serializeAws_restJson1BatchPermissionsRequestEntryList = ( + input: BatchPermissionsRequestEntry[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1BatchPermissionsRequestEntry(entry, context); + }); +}; + +const serializeAws_restJson1CatalogResource = (input: CatalogResource, context: __SerdeContext): any => { + return {}; +}; + +const serializeAws_restJson1ColumnNames = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1ColumnWildcard = (input: ColumnWildcard, context: __SerdeContext): any => { + return { + ...(input.ExcludedColumnNames !== undefined && + input.ExcludedColumnNames !== null && { + ExcludedColumnNames: serializeAws_restJson1ColumnNames(input.ExcludedColumnNames, context), + }), + }; +}; + +const serializeAws_restJson1DatabaseResource = (input: DatabaseResource, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + }; +}; + +const serializeAws_restJson1DataCellsFilter = (input: DataCellsFilter, context: __SerdeContext): any => { + return { + ...(input.ColumnNames !== undefined && + input.ColumnNames !== null && { ColumnNames: serializeAws_restJson1ColumnNames(input.ColumnNames, context) }), + ...(input.ColumnWildcard !== undefined && + input.ColumnWildcard !== null && { + ColumnWildcard: serializeAws_restJson1ColumnWildcard(input.ColumnWildcard, context), + }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.RowFilter !== undefined && + input.RowFilter !== null && { RowFilter: serializeAws_restJson1RowFilter(input.RowFilter, context) }), + ...(input.TableCatalogId !== undefined && + input.TableCatalogId !== null && { TableCatalogId: input.TableCatalogId }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }; +}; + +const serializeAws_restJson1DataCellsFilterResource = ( + input: DataCellsFilterResource, + context: __SerdeContext +): any => { + return { + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.TableCatalogId !== undefined && + input.TableCatalogId !== null && { TableCatalogId: input.TableCatalogId }), + ...(input.TableName !== undefined && input.TableName !== null && { TableName: input.TableName }), + }; +}; + +const serializeAws_restJson1DataLakePrincipal = (input: DataLakePrincipal, context: __SerdeContext): any => { + return { + ...(input.DataLakePrincipalIdentifier !== undefined && + input.DataLakePrincipalIdentifier !== null && { DataLakePrincipalIdentifier: input.DataLakePrincipalIdentifier }), + }; +}; + +const serializeAws_restJson1DataLakePrincipalList = (input: DataLakePrincipal[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1DataLakePrincipal(entry, context); + }); +}; + +const serializeAws_restJson1DataLakeSettings = (input: DataLakeSettings, context: __SerdeContext): any => { + return { + ...(input.CreateDatabaseDefaultPermissions !== undefined && + input.CreateDatabaseDefaultPermissions !== null && { + CreateDatabaseDefaultPermissions: serializeAws_restJson1PrincipalPermissionsList( + input.CreateDatabaseDefaultPermissions, + context + ), + }), + ...(input.CreateTableDefaultPermissions !== undefined && + input.CreateTableDefaultPermissions !== null && { + CreateTableDefaultPermissions: serializeAws_restJson1PrincipalPermissionsList( + input.CreateTableDefaultPermissions, + context + ), + }), + ...(input.DataLakeAdmins !== undefined && + input.DataLakeAdmins !== null && { + DataLakeAdmins: serializeAws_restJson1DataLakePrincipalList(input.DataLakeAdmins, context), + }), + ...(input.TrustedResourceOwners !== undefined && + input.TrustedResourceOwners !== null && { + TrustedResourceOwners: serializeAws_restJson1TrustedResourceOwners(input.TrustedResourceOwners, context), + }), + }; +}; + +const serializeAws_restJson1DataLocationResource = (input: DataLocationResource, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.ResourceArn !== undefined && input.ResourceArn !== null && { ResourceArn: input.ResourceArn }), + }; +}; + +const serializeAws_restJson1DeleteObjectInput = (input: DeleteObjectInput, context: __SerdeContext): any => { + return { + ...(input.ETag !== undefined && input.ETag !== null && { ETag: input.ETag }), + ...(input.PartitionValues !== undefined && + input.PartitionValues !== null && { + PartitionValues: serializeAws_restJson1PartitionValuesList(input.PartitionValues, context), + }), + ...(input.Uri !== undefined && input.Uri !== null && { Uri: input.Uri }), + }; +}; + +const serializeAws_restJson1Expression = (input: LFTag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1LFTag(entry, context); + }); +}; + +const serializeAws_restJson1FilterCondition = (input: FilterCondition, context: __SerdeContext): any => { + return { + ...(input.ComparisonOperator !== undefined && + input.ComparisonOperator !== null && { ComparisonOperator: input.ComparisonOperator }), + ...(input.Field !== undefined && input.Field !== null && { Field: input.Field }), + ...(input.StringValueList !== undefined && + input.StringValueList !== null && { + StringValueList: serializeAws_restJson1StringValueList(input.StringValueList, context), + }), + }; +}; + +const serializeAws_restJson1FilterConditionList = (input: FilterCondition[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1FilterCondition(entry, context); + }); +}; + +const serializeAws_restJson1LFTag = (input: LFTag, context: __SerdeContext): any => { + return { + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + ...(input.TagValues !== undefined && + input.TagValues !== null && { TagValues: serializeAws_restJson1TagValueList(input.TagValues, context) }), + }; +}; + +const serializeAws_restJson1LFTagKeyResource = (input: LFTagKeyResource, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + ...(input.TagValues !== undefined && + input.TagValues !== null && { TagValues: serializeAws_restJson1TagValueList(input.TagValues, context) }), + }; +}; + +const serializeAws_restJson1LFTagPair = (input: LFTagPair, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.TagKey !== undefined && input.TagKey !== null && { TagKey: input.TagKey }), + ...(input.TagValues !== undefined && + input.TagValues !== null && { TagValues: serializeAws_restJson1TagValueList(input.TagValues, context) }), + }; +}; + +const serializeAws_restJson1LFTagPolicyResource = (input: LFTagPolicyResource, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.Expression !== undefined && + input.Expression !== null && { Expression: serializeAws_restJson1Expression(input.Expression, context) }), + ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), + }; +}; + +const serializeAws_restJson1LFTagsList = (input: LFTagPair[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1LFTagPair(entry, context); + }); +}; + +const serializeAws_restJson1PartitionValuesList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1PermissionList = (input: (Permission | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1PrincipalPermissions = (input: PrincipalPermissions, context: __SerdeContext): any => { + return { + ...(input.Permissions !== undefined && + input.Permissions !== null && { Permissions: serializeAws_restJson1PermissionList(input.Permissions, context) }), + ...(input.Principal !== undefined && + input.Principal !== null && { Principal: serializeAws_restJson1DataLakePrincipal(input.Principal, context) }), + }; +}; + +const serializeAws_restJson1PrincipalPermissionsList = ( + input: PrincipalPermissions[], + context: __SerdeContext +): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1PrincipalPermissions(entry, context); + }); +}; + +const serializeAws_restJson1QueryParameterMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1QueryPlanningContext = (input: QueryPlanningContext, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.QueryAsOfTime !== undefined && + input.QueryAsOfTime !== null && { QueryAsOfTime: Math.round(input.QueryAsOfTime.getTime() / 1000) }), + ...(input.QueryParameters !== undefined && + input.QueryParameters !== null && { + QueryParameters: serializeAws_restJson1QueryParameterMap(input.QueryParameters, context), + }), + ...(input.TransactionId !== undefined && input.TransactionId !== null && { TransactionId: input.TransactionId }), + }; +}; + +const serializeAws_restJson1Resource = (input: Resource, context: __SerdeContext): any => { + return { + ...(input.Catalog !== undefined && + input.Catalog !== null && { Catalog: serializeAws_restJson1CatalogResource(input.Catalog, context) }), + ...(input.DataCellsFilter !== undefined && + input.DataCellsFilter !== null && { + DataCellsFilter: serializeAws_restJson1DataCellsFilterResource(input.DataCellsFilter, context), + }), + ...(input.DataLocation !== undefined && + input.DataLocation !== null && { + DataLocation: serializeAws_restJson1DataLocationResource(input.DataLocation, context), + }), + ...(input.Database !== undefined && + input.Database !== null && { Database: serializeAws_restJson1DatabaseResource(input.Database, context) }), + ...(input.LFTag !== undefined && + input.LFTag !== null && { LFTag: serializeAws_restJson1LFTagKeyResource(input.LFTag, context) }), + ...(input.LFTagPolicy !== undefined && + input.LFTagPolicy !== null && { + LFTagPolicy: serializeAws_restJson1LFTagPolicyResource(input.LFTagPolicy, context), + }), + ...(input.Table !== undefined && + input.Table !== null && { Table: serializeAws_restJson1TableResource(input.Table, context) }), + ...(input.TableWithColumns !== undefined && + input.TableWithColumns !== null && { + TableWithColumns: serializeAws_restJson1TableWithColumnsResource(input.TableWithColumns, context), + }), + }; +}; + +const serializeAws_restJson1RowFilter = (input: RowFilter, context: __SerdeContext): any => { + return { + ...(input.AllRowsWildcard !== undefined && + input.AllRowsWildcard !== null && { + AllRowsWildcard: serializeAws_restJson1AllRowsWildcard(input.AllRowsWildcard, context), + }), + ...(input.FilterExpression !== undefined && + input.FilterExpression !== null && { FilterExpression: input.FilterExpression }), + }; +}; + +const serializeAws_restJson1StorageOptimizerConfig = ( + input: { [key: string]: string }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1StorageOptimizerConfigMap = ( + input: { [key: string]: { [key: string]: string } }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [OptimizerType | string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: serializeAws_restJson1StorageOptimizerConfig(value, context), + }; + }, {}); +}; + +const serializeAws_restJson1StringValueList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1TableResource = (input: TableResource, context: __SerdeContext): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.TableWildcard !== undefined && + input.TableWildcard !== null && { + TableWildcard: serializeAws_restJson1TableWildcard(input.TableWildcard, context), + }), + }; +}; + +const serializeAws_restJson1TableWildcard = (input: TableWildcard, context: __SerdeContext): any => { + return {}; +}; + +const serializeAws_restJson1TableWithColumnsResource = ( + input: TableWithColumnsResource, + context: __SerdeContext +): any => { + return { + ...(input.CatalogId !== undefined && input.CatalogId !== null && { CatalogId: input.CatalogId }), + ...(input.ColumnNames !== undefined && + input.ColumnNames !== null && { ColumnNames: serializeAws_restJson1ColumnNames(input.ColumnNames, context) }), + ...(input.ColumnWildcard !== undefined && + input.ColumnWildcard !== null && { + ColumnWildcard: serializeAws_restJson1ColumnWildcard(input.ColumnWildcard, context), + }), + ...(input.DatabaseName !== undefined && input.DatabaseName !== null && { DatabaseName: input.DatabaseName }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + }; +}; + +const serializeAws_restJson1TagValueList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1TrustedResourceOwners = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1VirtualObject = (input: VirtualObject, context: __SerdeContext): any => { + return { + ...(input.ETag !== undefined && input.ETag !== null && { ETag: input.ETag }), + ...(input.Uri !== undefined && input.Uri !== null && { Uri: input.Uri }), + }; +}; + +const serializeAws_restJson1VirtualObjectList = (input: VirtualObject[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1VirtualObject(entry, context); + }); +}; + +const serializeAws_restJson1WriteOperation = (input: WriteOperation, context: __SerdeContext): any => { + return { + ...(input.AddObject !== undefined && + input.AddObject !== null && { AddObject: serializeAws_restJson1AddObjectInput(input.AddObject, context) }), + ...(input.DeleteObject !== undefined && + input.DeleteObject !== null && { + DeleteObject: serializeAws_restJson1DeleteObjectInput(input.DeleteObject, context), + }), + }; +}; + +const serializeAws_restJson1WriteOperationList = (input: WriteOperation[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1WriteOperation(entry, context); + }); +}; + +const deserializeAws_restJson1AllRowsWildcard = (output: any, context: __SerdeContext): AllRowsWildcard => { + return {} as any; +}; + +const deserializeAws_restJson1BatchPermissionsFailureEntry = ( + output: any, + context: __SerdeContext +): BatchPermissionsFailureEntry => { + return { + Error: + output.Error !== undefined && output.Error !== null + ? deserializeAws_restJson1ErrorDetail(output.Error, context) + : undefined, + RequestEntry: + output.RequestEntry !== undefined && output.RequestEntry !== null + ? deserializeAws_restJson1BatchPermissionsRequestEntry(output.RequestEntry, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1BatchPermissionsFailureList = ( + output: any, + context: __SerdeContext +): BatchPermissionsFailureEntry[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1BatchPermissionsFailureEntry(entry, context); + }); +}; + +const deserializeAws_restJson1BatchPermissionsRequestEntry = ( + output: any, + context: __SerdeContext +): BatchPermissionsRequestEntry => { + return { + Id: __expectString(output.Id), + Permissions: + output.Permissions !== undefined && output.Permissions !== null + ? deserializeAws_restJson1PermissionList(output.Permissions, context) + : undefined, + PermissionsWithGrantOption: + output.PermissionsWithGrantOption !== undefined && output.PermissionsWithGrantOption !== null + ? deserializeAws_restJson1PermissionList(output.PermissionsWithGrantOption, context) + : undefined, + Principal: + output.Principal !== undefined && output.Principal !== null + ? deserializeAws_restJson1DataLakePrincipal(output.Principal, context) + : undefined, + Resource: + output.Resource !== undefined && output.Resource !== null + ? deserializeAws_restJson1Resource(output.Resource, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1CatalogResource = (output: any, context: __SerdeContext): CatalogResource => { + return {} as any; +}; + +const deserializeAws_restJson1ColumnLFTag = (output: any, context: __SerdeContext): ColumnLFTag => { + return { + LFTags: + output.LFTags !== undefined && output.LFTags !== null + ? deserializeAws_restJson1LFTagsList(output.LFTags, context) + : undefined, + Name: __expectString(output.Name), + } as any; +}; + +const deserializeAws_restJson1ColumnLFTagsList = (output: any, context: __SerdeContext): ColumnLFTag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ColumnLFTag(entry, context); + }); +}; + +const deserializeAws_restJson1ColumnNames = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1ColumnWildcard = (output: any, context: __SerdeContext): ColumnWildcard => { + return { + ExcludedColumnNames: + output.ExcludedColumnNames !== undefined && output.ExcludedColumnNames !== null + ? deserializeAws_restJson1ColumnNames(output.ExcludedColumnNames, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1DatabaseLFTagsList = (output: any, context: __SerdeContext): TaggedDatabase[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1TaggedDatabase(entry, context); + }); +}; + +const deserializeAws_restJson1DatabaseResource = (output: any, context: __SerdeContext): DatabaseResource => { + return { + CatalogId: __expectString(output.CatalogId), + Name: __expectString(output.Name), + } as any; +}; + +const deserializeAws_restJson1DataCellsFilter = (output: any, context: __SerdeContext): DataCellsFilter => { + return { + ColumnNames: + output.ColumnNames !== undefined && output.ColumnNames !== null + ? deserializeAws_restJson1ColumnNames(output.ColumnNames, context) + : undefined, + ColumnWildcard: + output.ColumnWildcard !== undefined && output.ColumnWildcard !== null + ? deserializeAws_restJson1ColumnWildcard(output.ColumnWildcard, context) + : undefined, + DatabaseName: __expectString(output.DatabaseName), + Name: __expectString(output.Name), + RowFilter: + output.RowFilter !== undefined && output.RowFilter !== null + ? deserializeAws_restJson1RowFilter(output.RowFilter, context) + : undefined, + TableCatalogId: __expectString(output.TableCatalogId), + TableName: __expectString(output.TableName), + } as any; +}; + +const deserializeAws_restJson1DataCellsFilterList = (output: any, context: __SerdeContext): DataCellsFilter[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DataCellsFilter(entry, context); + }); +}; + +const deserializeAws_restJson1DataCellsFilterResource = ( + output: any, + context: __SerdeContext +): DataCellsFilterResource => { + return { + DatabaseName: __expectString(output.DatabaseName), + Name: __expectString(output.Name), + TableCatalogId: __expectString(output.TableCatalogId), + TableName: __expectString(output.TableName), + } as any; +}; + +const deserializeAws_restJson1DataLakePrincipal = (output: any, context: __SerdeContext): DataLakePrincipal => { + return { + DataLakePrincipalIdentifier: __expectString(output.DataLakePrincipalIdentifier), + } as any; +}; + +const deserializeAws_restJson1DataLakePrincipalList = (output: any, context: __SerdeContext): DataLakePrincipal[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1DataLakePrincipal(entry, context); + }); +}; + +const deserializeAws_restJson1DataLakeSettings = (output: any, context: __SerdeContext): DataLakeSettings => { + return { + CreateDatabaseDefaultPermissions: + output.CreateDatabaseDefaultPermissions !== undefined && output.CreateDatabaseDefaultPermissions !== null + ? deserializeAws_restJson1PrincipalPermissionsList(output.CreateDatabaseDefaultPermissions, context) + : undefined, + CreateTableDefaultPermissions: + output.CreateTableDefaultPermissions !== undefined && output.CreateTableDefaultPermissions !== null + ? deserializeAws_restJson1PrincipalPermissionsList(output.CreateTableDefaultPermissions, context) + : undefined, + DataLakeAdmins: + output.DataLakeAdmins !== undefined && output.DataLakeAdmins !== null + ? deserializeAws_restJson1DataLakePrincipalList(output.DataLakeAdmins, context) + : undefined, + TrustedResourceOwners: + output.TrustedResourceOwners !== undefined && output.TrustedResourceOwners !== null + ? deserializeAws_restJson1TrustedResourceOwners(output.TrustedResourceOwners, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1DataLocationResource = (output: any, context: __SerdeContext): DataLocationResource => { + return { + CatalogId: __expectString(output.CatalogId), + ResourceArn: __expectString(output.ResourceArn), + } as any; +}; + +const deserializeAws_restJson1DetailsMap = (output: any, context: __SerdeContext): DetailsMap => { + return { + ResourceShare: + output.ResourceShare !== undefined && output.ResourceShare !== null + ? deserializeAws_restJson1ResourceShareList(output.ResourceShare, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ErrorDetail = (output: any, context: __SerdeContext): ErrorDetail => { + return { + ErrorCode: __expectString(output.ErrorCode), + ErrorMessage: __expectString(output.ErrorMessage), + } as any; +}; + +const deserializeAws_restJson1ExecutionStatistics = (output: any, context: __SerdeContext): ExecutionStatistics => { + return { + AverageExecutionTimeMillis: __expectLong(output.AverageExecutionTimeMillis), + DataScannedBytes: __expectLong(output.DataScannedBytes), + WorkUnitsExecutedCount: __expectLong(output.WorkUnitsExecutedCount), + } as any; +}; + +const deserializeAws_restJson1Expression = (output: any, context: __SerdeContext): LFTag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LFTag(entry, context); + }); +}; + +const deserializeAws_restJson1LFTag = (output: any, context: __SerdeContext): LFTag => { + return { + TagKey: __expectString(output.TagKey), + TagValues: + output.TagValues !== undefined && output.TagValues !== null + ? deserializeAws_restJson1TagValueList(output.TagValues, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LFTagError = (output: any, context: __SerdeContext): LFTagError => { + return { + Error: + output.Error !== undefined && output.Error !== null + ? deserializeAws_restJson1ErrorDetail(output.Error, context) + : undefined, + LFTag: + output.LFTag !== undefined && output.LFTag !== null + ? deserializeAws_restJson1LFTagPair(output.LFTag, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LFTagErrors = (output: any, context: __SerdeContext): LFTagError[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LFTagError(entry, context); + }); +}; + +const deserializeAws_restJson1LFTagKeyResource = (output: any, context: __SerdeContext): LFTagKeyResource => { + return { + CatalogId: __expectString(output.CatalogId), + TagKey: __expectString(output.TagKey), + TagValues: + output.TagValues !== undefined && output.TagValues !== null + ? deserializeAws_restJson1TagValueList(output.TagValues, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LFTagPair = (output: any, context: __SerdeContext): LFTagPair => { + return { + CatalogId: __expectString(output.CatalogId), + TagKey: __expectString(output.TagKey), + TagValues: + output.TagValues !== undefined && output.TagValues !== null + ? deserializeAws_restJson1TagValueList(output.TagValues, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1LFTagPolicyResource = (output: any, context: __SerdeContext): LFTagPolicyResource => { + return { + CatalogId: __expectString(output.CatalogId), + Expression: + output.Expression !== undefined && output.Expression !== null + ? deserializeAws_restJson1Expression(output.Expression, context) + : undefined, + ResourceType: __expectString(output.ResourceType), + } as any; +}; + +const deserializeAws_restJson1LFTagsList = (output: any, context: __SerdeContext): LFTagPair[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LFTagPair(entry, context); + }); +}; + +const deserializeAws_restJson1PartitionedTableObjectsList = ( + output: any, + context: __SerdeContext +): PartitionObjects[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PartitionObjects(entry, context); + }); +}; + +const deserializeAws_restJson1PartitionObjects = (output: any, context: __SerdeContext): PartitionObjects => { + return { + Objects: + output.Objects !== undefined && output.Objects !== null + ? deserializeAws_restJson1TableObjectList(output.Objects, context) + : undefined, + PartitionValues: + output.PartitionValues !== undefined && output.PartitionValues !== null + ? deserializeAws_restJson1PartitionValuesList(output.PartitionValues, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PartitionValuesList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1PermissionList = (output: any, context: __SerdeContext): (Permission | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1PlanningStatistics = (output: any, context: __SerdeContext): PlanningStatistics => { + return { + EstimatedDataToScanBytes: __expectLong(output.EstimatedDataToScanBytes), + PlanningTimeMillis: __expectLong(output.PlanningTimeMillis), + QueueTimeMillis: __expectLong(output.QueueTimeMillis), + WorkUnitsGeneratedCount: __expectLong(output.WorkUnitsGeneratedCount), + } as any; +}; + +const deserializeAws_restJson1PrincipalPermissions = (output: any, context: __SerdeContext): PrincipalPermissions => { + return { + Permissions: + output.Permissions !== undefined && output.Permissions !== null + ? deserializeAws_restJson1PermissionList(output.Permissions, context) + : undefined, + Principal: + output.Principal !== undefined && output.Principal !== null + ? deserializeAws_restJson1DataLakePrincipal(output.Principal, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PrincipalPermissionsList = ( + output: any, + context: __SerdeContext +): PrincipalPermissions[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PrincipalPermissions(entry, context); + }); +}; + +const deserializeAws_restJson1PrincipalResourcePermissions = ( + output: any, + context: __SerdeContext +): PrincipalResourcePermissions => { + return { + AdditionalDetails: + output.AdditionalDetails !== undefined && output.AdditionalDetails !== null + ? deserializeAws_restJson1DetailsMap(output.AdditionalDetails, context) + : undefined, + Permissions: + output.Permissions !== undefined && output.Permissions !== null + ? deserializeAws_restJson1PermissionList(output.Permissions, context) + : undefined, + PermissionsWithGrantOption: + output.PermissionsWithGrantOption !== undefined && output.PermissionsWithGrantOption !== null + ? deserializeAws_restJson1PermissionList(output.PermissionsWithGrantOption, context) + : undefined, + Principal: + output.Principal !== undefined && output.Principal !== null + ? deserializeAws_restJson1DataLakePrincipal(output.Principal, context) + : undefined, + Resource: + output.Resource !== undefined && output.Resource !== null + ? deserializeAws_restJson1Resource(output.Resource, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1PrincipalResourcePermissionsList = ( + output: any, + context: __SerdeContext +): PrincipalResourcePermissions[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PrincipalResourcePermissions(entry, context); + }); +}; + +const deserializeAws_restJson1Resource = (output: any, context: __SerdeContext): Resource => { + return { + Catalog: + output.Catalog !== undefined && output.Catalog !== null + ? deserializeAws_restJson1CatalogResource(output.Catalog, context) + : undefined, + DataCellsFilter: + output.DataCellsFilter !== undefined && output.DataCellsFilter !== null + ? deserializeAws_restJson1DataCellsFilterResource(output.DataCellsFilter, context) + : undefined, + DataLocation: + output.DataLocation !== undefined && output.DataLocation !== null + ? deserializeAws_restJson1DataLocationResource(output.DataLocation, context) + : undefined, + Database: + output.Database !== undefined && output.Database !== null + ? deserializeAws_restJson1DatabaseResource(output.Database, context) + : undefined, + LFTag: + output.LFTag !== undefined && output.LFTag !== null + ? deserializeAws_restJson1LFTagKeyResource(output.LFTag, context) + : undefined, + LFTagPolicy: + output.LFTagPolicy !== undefined && output.LFTagPolicy !== null + ? deserializeAws_restJson1LFTagPolicyResource(output.LFTagPolicy, context) + : undefined, + Table: + output.Table !== undefined && output.Table !== null + ? deserializeAws_restJson1TableResource(output.Table, context) + : undefined, + TableWithColumns: + output.TableWithColumns !== undefined && output.TableWithColumns !== null + ? deserializeAws_restJson1TableWithColumnsResource(output.TableWithColumns, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1ResourceInfo = (output: any, context: __SerdeContext): ResourceInfo => { + return { + LastModified: + output.LastModified !== undefined && output.LastModified !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.LastModified))) + : undefined, + ResourceArn: __expectString(output.ResourceArn), + RoleArn: __expectString(output.RoleArn), + } as any; +}; + +const deserializeAws_restJson1ResourceInfoList = (output: any, context: __SerdeContext): ResourceInfo[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ResourceInfo(entry, context); + }); +}; + +const deserializeAws_restJson1ResourceShareList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1RowFilter = (output: any, context: __SerdeContext): RowFilter => { + return { + AllRowsWildcard: + output.AllRowsWildcard !== undefined && output.AllRowsWildcard !== null + ? deserializeAws_restJson1AllRowsWildcard(output.AllRowsWildcard, context) + : undefined, + FilterExpression: __expectString(output.FilterExpression), + } as any; +}; + +const deserializeAws_restJson1StorageOptimizer = (output: any, context: __SerdeContext): StorageOptimizer => { + return { + Config: + output.Config !== undefined && output.Config !== null + ? deserializeAws_restJson1StorageOptimizerConfig(output.Config, context) + : undefined, + ErrorMessage: __expectString(output.ErrorMessage), + LastRunDetails: __expectString(output.LastRunDetails), + StorageOptimizerType: __expectString(output.StorageOptimizerType), + Warnings: __expectString(output.Warnings), + } as any; +}; + +const deserializeAws_restJson1StorageOptimizerConfig = ( + output: any, + context: __SerdeContext +): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1StorageOptimizerList = (output: any, context: __SerdeContext): StorageOptimizer[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1StorageOptimizer(entry, context); + }); +}; + +const deserializeAws_restJson1TableLFTagsList = (output: any, context: __SerdeContext): TaggedTable[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1TaggedTable(entry, context); + }); +}; + +const deserializeAws_restJson1TableObject = (output: any, context: __SerdeContext): TableObject => { + return { + ETag: __expectString(output.ETag), + Size: __expectLong(output.Size), + Uri: __expectString(output.Uri), + } as any; +}; + +const deserializeAws_restJson1TableObjectList = (output: any, context: __SerdeContext): TableObject[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1TableObject(entry, context); + }); +}; + +const deserializeAws_restJson1TableResource = (output: any, context: __SerdeContext): TableResource => { + return { + CatalogId: __expectString(output.CatalogId), + DatabaseName: __expectString(output.DatabaseName), + Name: __expectString(output.Name), + TableWildcard: + output.TableWildcard !== undefined && output.TableWildcard !== null + ? deserializeAws_restJson1TableWildcard(output.TableWildcard, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1TableWildcard = (output: any, context: __SerdeContext): TableWildcard => { + return {} as any; +}; + +const deserializeAws_restJson1TableWithColumnsResource = ( + output: any, + context: __SerdeContext +): TableWithColumnsResource => { + return { + CatalogId: __expectString(output.CatalogId), + ColumnNames: + output.ColumnNames !== undefined && output.ColumnNames !== null + ? deserializeAws_restJson1ColumnNames(output.ColumnNames, context) + : undefined, + ColumnWildcard: + output.ColumnWildcard !== undefined && output.ColumnWildcard !== null + ? deserializeAws_restJson1ColumnWildcard(output.ColumnWildcard, context) + : undefined, + DatabaseName: __expectString(output.DatabaseName), + Name: __expectString(output.Name), + } as any; +}; + +const deserializeAws_restJson1TaggedDatabase = (output: any, context: __SerdeContext): TaggedDatabase => { + return { + Database: + output.Database !== undefined && output.Database !== null + ? deserializeAws_restJson1DatabaseResource(output.Database, context) + : undefined, + LFTags: + output.LFTags !== undefined && output.LFTags !== null + ? deserializeAws_restJson1LFTagsList(output.LFTags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1TaggedTable = (output: any, context: __SerdeContext): TaggedTable => { + return { + LFTagOnDatabase: + output.LFTagOnDatabase !== undefined && output.LFTagOnDatabase !== null + ? deserializeAws_restJson1LFTagsList(output.LFTagOnDatabase, context) + : undefined, + LFTagsOnColumns: + output.LFTagsOnColumns !== undefined && output.LFTagsOnColumns !== null + ? deserializeAws_restJson1ColumnLFTagsList(output.LFTagsOnColumns, context) + : undefined, + LFTagsOnTable: + output.LFTagsOnTable !== undefined && output.LFTagsOnTable !== null + ? deserializeAws_restJson1LFTagsList(output.LFTagsOnTable, context) + : undefined, + Table: + output.Table !== undefined && output.Table !== null + ? deserializeAws_restJson1TableResource(output.Table, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1TagValueList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1TransactionDescription = ( + output: any, + context: __SerdeContext +): TransactionDescription => { + return { + TransactionEndTime: + output.TransactionEndTime !== undefined && output.TransactionEndTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.TransactionEndTime))) + : undefined, + TransactionId: __expectString(output.TransactionId), + TransactionStartTime: + output.TransactionStartTime !== undefined && output.TransactionStartTime !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.TransactionStartTime))) + : undefined, + TransactionStatus: __expectString(output.TransactionStatus), + } as any; +}; + +const deserializeAws_restJson1TransactionDescriptionList = ( + output: any, + context: __SerdeContext +): TransactionDescription[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1TransactionDescription(entry, context); + }); +}; + +const deserializeAws_restJson1TrustedResourceOwners = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1WorkUnitRange = (output: any, context: __SerdeContext): WorkUnitRange => { + return { + WorkUnitIdMax: __expectLong(output.WorkUnitIdMax), + WorkUnitIdMin: __expectLong(output.WorkUnitIdMin), + WorkUnitToken: __expectString(output.WorkUnitToken), + } as any; +}; + +const deserializeAws_restJson1WorkUnitRangeList = (output: any, context: __SerdeContext): WorkUnitRange[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1WorkUnitRange(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-outposts/src/models/models_0.ts b/clients/client-outposts/src/models/models_0.ts index ca3137ba0860..1ce743678c70 100644 --- a/clients/client-outposts/src/models/models_0.ts +++ b/clients/client-outposts/src/models/models_0.ts @@ -559,6 +559,11 @@ export namespace ServiceQuotaExceededException { }); } +export enum SupportedHardwareType { + RACK = "RACK", + SERVER = "SERVER", +} + export interface CreateOutpostInput { /** *

                                  The name of the Outpost.

                                  @@ -589,6 +594,13 @@ export interface CreateOutpostInput { *

                                  The tags to apply to the Outpost.

                                  */ Tags?: { [key: string]: string }; + + /** + *

                                  + * The type of hardware for this Outpost. + *

                                  + */ + SupportedHardwareType?: SupportedHardwareType | string; } export namespace CreateOutpostInput { @@ -658,6 +670,13 @@ export interface Outpost { *

                                  The Amazon Resource Name (ARN) of the site.

                                  */ SiteArn?: string; + + /** + *

                                  + * The hardware type. + *

                                  + */ + SupportedHardwareType?: SupportedHardwareType | string; } export namespace Outpost { diff --git a/clients/client-outposts/src/protocols/Aws_restJson1.ts b/clients/client-outposts/src/protocols/Aws_restJson1.ts index a86837f5957e..7ebd660f8eea 100644 --- a/clients/client-outposts/src/protocols/Aws_restJson1.ts +++ b/clients/client-outposts/src/protocols/Aws_restJson1.ts @@ -147,6 +147,8 @@ export const serializeAws_restJson1CreateOutpostCommand = async ( ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), ...(input.SiteId !== undefined && input.SiteId !== null && { SiteId: input.SiteId }), + ...(input.SupportedHardwareType !== undefined && + input.SupportedHardwareType !== null && { SupportedHardwareType: input.SupportedHardwareType }), ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_restJson1TagMap(input.Tags, context) }), }); return new __HttpRequest({ @@ -2909,6 +2911,7 @@ const deserializeAws_restJson1Outpost = (output: any, context: __SerdeContext): OwnerId: __expectString(output.OwnerId), SiteArn: __expectString(output.SiteArn), SiteId: __expectString(output.SiteId), + SupportedHardwareType: __expectString(output.SupportedHardwareType), Tags: output.Tags !== undefined && output.Tags !== null ? deserializeAws_restJson1TagMap(output.Tags, context) diff --git a/clients/client-rbin/.gitignore b/clients/client-rbin/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-rbin/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-rbin/LICENSE b/clients/client-rbin/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-rbin/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-rbin/README.md b/clients/client-rbin/README.md new file mode 100644 index 000000000000..adf9545bf114 --- /dev/null +++ b/clients/client-rbin/README.md @@ -0,0 +1,216 @@ +# @aws-sdk/client-rbin + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-rbin/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-rbin) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-rbin.svg)](https://www.npmjs.com/package/@aws-sdk/client-rbin) + +## Description + +AWS SDK for JavaScript Rbin Client for Node.js, Browser and React Native. + +

                                  This is the Recycle Bin API Reference. This documentation provides +descriptions and syntax for each of the actions and data types in Recycle Bin.

                                  + +

                                  Recycle Bin is a snapshot recovery feature that enables you to restore accidentally +deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained +in the Recycle Bin for a time period that you specify.

                                  + +

                                  You can restore a snapshot from the Recycle Bin at any time before its retention period +expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the +Recycle Bin, and you can then use it in the same way you use any other snapshot in your +account. If the retention period expires and the snapshot is not restored, the snapshot is +permanently deleted from the Recycle Bin and is no longer available for recovery. For more +information about Recycle Bin, see +Recycle Bin in the Amazon EC2 User Guide.

                                  + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-rbin +using your favorite package manager: + +- `npm install @aws-sdk/client-rbin` +- `yarn add @aws-sdk/client-rbin` +- `pnpm add @aws-sdk/client-rbin` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `RbinClient` and +the commands you need, for example `CreateRuleCommand`: + +```js +// ES5 example +const { RbinClient, CreateRuleCommand } = require("@aws-sdk/client-rbin"); +``` + +```ts +// ES6+ example +import { RbinClient, CreateRuleCommand } from "@aws-sdk/client-rbin"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new RbinClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new CreateRuleCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-rbin"; +const client = new AWS.Rbin({ region: "REGION" }); + +// async/await. +try { + const data = await client.createRule(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .createRule(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.createRule(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-rbin` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-rbin/jest.config.js b/clients/client-rbin/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-rbin/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-rbin/package.json b/clients/client-rbin/package.json new file mode 100644 index 000000000000..a171ab4bd174 --- /dev/null +++ b/clients/client-rbin/package.json @@ -0,0 +1,94 @@ +{ + "name": "@aws-sdk/client-rbin", + "description": "AWS SDK for JavaScript Rbin Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-rbin", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-rbin" + } +} diff --git a/clients/client-rbin/src/Rbin.ts b/clients/client-rbin/src/Rbin.ts new file mode 100644 index 000000000000..e5d04c03cfef --- /dev/null +++ b/clients/client-rbin/src/Rbin.ts @@ -0,0 +1,260 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { CreateRuleCommand, CreateRuleCommandInput, CreateRuleCommandOutput } from "./commands/CreateRuleCommand"; +import { DeleteRuleCommand, DeleteRuleCommandInput, DeleteRuleCommandOutput } from "./commands/DeleteRuleCommand"; +import { GetRuleCommand, GetRuleCommandInput, GetRuleCommandOutput } from "./commands/GetRuleCommand"; +import { ListRulesCommand, ListRulesCommandInput, ListRulesCommandOutput } from "./commands/ListRulesCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { UpdateRuleCommand, UpdateRuleCommandInput, UpdateRuleCommandOutput } from "./commands/UpdateRuleCommand"; +import { RbinClient } from "./RbinClient"; + +/** + *

                                  This is the Recycle Bin API Reference. This documentation provides + * descriptions and syntax for each of the actions and data types in Recycle Bin.

                                  + * + *

                                  Recycle Bin is a snapshot recovery feature that enables you to restore accidentally + * deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained + * in the Recycle Bin for a time period that you specify.

                                  + * + *

                                  You can restore a snapshot from the Recycle Bin at any time before its retention period + * expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the + * Recycle Bin, and you can then use it in the same way you use any other snapshot in your + * account. If the retention period expires and the snapshot is not restored, the snapshot is + * permanently deleted from the Recycle Bin and is no longer available for recovery. For more + * information about Recycle Bin, see + * Recycle Bin in the Amazon EC2 User Guide.

                                  + */ +export class Rbin extends RbinClient { + /** + *

                                  Creates a Recycle Bin retention rule. For more information, see + * Create Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + */ + public createRule(args: CreateRuleCommandInput, options?: __HttpHandlerOptions): Promise; + public createRule(args: CreateRuleCommandInput, cb: (err: any, data?: CreateRuleCommandOutput) => void): void; + public createRule( + args: CreateRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateRuleCommandOutput) => void + ): void; + public createRule( + args: CreateRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateRuleCommandOutput) => void), + cb?: (err: any, data?: CreateRuleCommandOutput) => void + ): Promise | void { + const command = new CreateRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes a Recycle Bin retention rule. For more information, see + * Delete Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + */ + public deleteRule(args: DeleteRuleCommandInput, options?: __HttpHandlerOptions): Promise; + public deleteRule(args: DeleteRuleCommandInput, cb: (err: any, data?: DeleteRuleCommandOutput) => void): void; + public deleteRule( + args: DeleteRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteRuleCommandOutput) => void + ): void; + public deleteRule( + args: DeleteRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteRuleCommandOutput) => void), + cb?: (err: any, data?: DeleteRuleCommandOutput) => void + ): Promise | void { + const command = new DeleteRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Gets information about a Recycle Bin retention rule.

                                  + */ + public getRule(args: GetRuleCommandInput, options?: __HttpHandlerOptions): Promise; + public getRule(args: GetRuleCommandInput, cb: (err: any, data?: GetRuleCommandOutput) => void): void; + public getRule( + args: GetRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetRuleCommandOutput) => void + ): void; + public getRule( + args: GetRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetRuleCommandOutput) => void), + cb?: (err: any, data?: GetRuleCommandOutput) => void + ): Promise | void { + const command = new GetRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists the Recycle Bin retention rules in the Region.

                                  + */ + public listRules(args: ListRulesCommandInput, options?: __HttpHandlerOptions): Promise; + public listRules(args: ListRulesCommandInput, cb: (err: any, data?: ListRulesCommandOutput) => void): void; + public listRules( + args: ListRulesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListRulesCommandOutput) => void + ): void; + public listRules( + args: ListRulesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListRulesCommandOutput) => void), + cb?: (err: any, data?: ListRulesCommandOutput) => void + ): Promise | void { + const command = new ListRulesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Lists the tags assigned a specific resource.

                                  + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Assigns tags to the specified resource.

                                  + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Unassigns a tag from a resource.

                                  + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates an existing Recycle Bin retention rule. For more information, see + * Update Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + */ + public updateRule(args: UpdateRuleCommandInput, options?: __HttpHandlerOptions): Promise; + public updateRule(args: UpdateRuleCommandInput, cb: (err: any, data?: UpdateRuleCommandOutput) => void): void; + public updateRule( + args: UpdateRuleCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateRuleCommandOutput) => void + ): void; + public updateRule( + args: UpdateRuleCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateRuleCommandOutput) => void), + cb?: (err: any, data?: UpdateRuleCommandOutput) => void + ): Promise | void { + const command = new UpdateRuleCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-rbin/src/RbinClient.ts b/clients/client-rbin/src/RbinClient.ts new file mode 100644 index 000000000000..9009c92634ea --- /dev/null +++ b/clients/client-rbin/src/RbinClient.ts @@ -0,0 +1,286 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { CreateRuleCommandInput, CreateRuleCommandOutput } from "./commands/CreateRuleCommand"; +import { DeleteRuleCommandInput, DeleteRuleCommandOutput } from "./commands/DeleteRuleCommand"; +import { GetRuleCommandInput, GetRuleCommandOutput } from "./commands/GetRuleCommand"; +import { ListRulesCommandInput, ListRulesCommandOutput } from "./commands/ListRulesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { UpdateRuleCommandInput, UpdateRuleCommandOutput } from "./commands/UpdateRuleCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | CreateRuleCommandInput + | DeleteRuleCommandInput + | GetRuleCommandInput + | ListRulesCommandInput + | ListTagsForResourceCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateRuleCommandInput; + +export type ServiceOutputTypes = + | CreateRuleCommandOutput + | DeleteRuleCommandOutput + | GetRuleCommandOutput + | ListRulesCommandOutput + | ListTagsForResourceCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateRuleCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type RbinClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of RbinClient class constructor that set the region, credentials and other options. + */ +export interface RbinClientConfig extends RbinClientConfigType {} + +type RbinClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of RbinClient class. This is resolved and normalized from the {@link RbinClientConfig | constructor configuration interface}. + */ +export interface RbinClientResolvedConfig extends RbinClientResolvedConfigType {} + +/** + *

                                  This is the Recycle Bin API Reference. This documentation provides + * descriptions and syntax for each of the actions and data types in Recycle Bin.

                                  + * + *

                                  Recycle Bin is a snapshot recovery feature that enables you to restore accidentally + * deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained + * in the Recycle Bin for a time period that you specify.

                                  + * + *

                                  You can restore a snapshot from the Recycle Bin at any time before its retention period + * expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the + * Recycle Bin, and you can then use it in the same way you use any other snapshot in your + * account. If the retention period expires and the snapshot is not restored, the snapshot is + * permanently deleted from the Recycle Bin and is no longer available for recovery. For more + * information about Recycle Bin, see + * Recycle Bin in the Amazon EC2 User Guide.

                                  + */ +export class RbinClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + RbinClientResolvedConfig +> { + /** + * The resolved configuration of RbinClient class. This is resolved and normalized from the {@link RbinClientConfig | constructor configuration interface}. + */ + readonly config: RbinClientResolvedConfig; + + constructor(configuration: RbinClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-rbin/src/commands/CreateRuleCommand.ts b/clients/client-rbin/src/commands/CreateRuleCommand.ts new file mode 100644 index 000000000000..12d0767af4a3 --- /dev/null +++ b/clients/client-rbin/src/commands/CreateRuleCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateRuleRequest, CreateRuleResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateRuleCommand, + serializeAws_restJson1CreateRuleCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface CreateRuleCommandInput extends CreateRuleRequest {} +export interface CreateRuleCommandOutput extends CreateRuleResponse, __MetadataBearer {} + +/** + *

                                  Creates a Recycle Bin retention rule. For more information, see + * Create Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, CreateRuleCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, CreateRuleCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new CreateRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateRuleCommandInput} for command's `input` shape. + * @see {@link CreateRuleCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class CreateRuleCommand extends $Command< + CreateRuleCommandInput, + CreateRuleCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "CreateRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateRuleCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/DeleteRuleCommand.ts b/clients/client-rbin/src/commands/DeleteRuleCommand.ts new file mode 100644 index 000000000000..c7cf5bb3ebc6 --- /dev/null +++ b/clients/client-rbin/src/commands/DeleteRuleCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteRuleRequest, DeleteRuleResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteRuleCommand, + serializeAws_restJson1DeleteRuleCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface DeleteRuleCommandInput extends DeleteRuleRequest {} +export interface DeleteRuleCommandOutput extends DeleteRuleResponse, __MetadataBearer {} + +/** + *

                                  Deletes a Recycle Bin retention rule. For more information, see + * Delete Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, DeleteRuleCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, DeleteRuleCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new DeleteRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteRuleCommandInput} for command's `input` shape. + * @see {@link DeleteRuleCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class DeleteRuleCommand extends $Command< + DeleteRuleCommandInput, + DeleteRuleCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "DeleteRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteRuleCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/GetRuleCommand.ts b/clients/client-rbin/src/commands/GetRuleCommand.ts new file mode 100644 index 000000000000..72362ac99a99 --- /dev/null +++ b/clients/client-rbin/src/commands/GetRuleCommand.ts @@ -0,0 +1,91 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetRuleRequest, GetRuleResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetRuleCommand, + serializeAws_restJson1GetRuleCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface GetRuleCommandInput extends GetRuleRequest {} +export interface GetRuleCommandOutput extends GetRuleResponse, __MetadataBearer {} + +/** + *

                                  Gets information about a Recycle Bin retention rule.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, GetRuleCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, GetRuleCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new GetRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetRuleCommandInput} for command's `input` shape. + * @see {@link GetRuleCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class GetRuleCommand extends $Command { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "GetRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetRuleCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/ListRulesCommand.ts b/clients/client-rbin/src/commands/ListRulesCommand.ts new file mode 100644 index 000000000000..a3f882b813cf --- /dev/null +++ b/clients/client-rbin/src/commands/ListRulesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListRulesRequest, ListRulesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListRulesCommand, + serializeAws_restJson1ListRulesCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface ListRulesCommandInput extends ListRulesRequest {} +export interface ListRulesCommandOutput extends ListRulesResponse, __MetadataBearer {} + +/** + *

                                  Lists the Recycle Bin retention rules in the Region.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, ListRulesCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, ListRulesCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new ListRulesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListRulesCommandInput} for command's `input` shape. + * @see {@link ListRulesCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class ListRulesCommand extends $Command< + ListRulesCommandInput, + ListRulesCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListRulesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "ListRulesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListRulesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListRulesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListRulesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListRulesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListRulesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts b/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..5dbe71fa4f75 --- /dev/null +++ b/clients/client-rbin/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                                  Lists the tags assigned a specific resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, ListTagsForResourceCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, ListTagsForResourceCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/TagResourceCommand.ts b/clients/client-rbin/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..689ab4f9a561 --- /dev/null +++ b/clients/client-rbin/src/commands/TagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                                  Assigns tags to the specified resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, TagResourceCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, TagResourceCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/UntagResourceCommand.ts b/clients/client-rbin/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..017ba5d5dfb9 --- /dev/null +++ b/clients/client-rbin/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                                  Unassigns a tag from a resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, UntagResourceCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, UntagResourceCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/UpdateRuleCommand.ts b/clients/client-rbin/src/commands/UpdateRuleCommand.ts new file mode 100644 index 000000000000..01dac4877c1e --- /dev/null +++ b/clients/client-rbin/src/commands/UpdateRuleCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateRuleRequest, UpdateRuleResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateRuleCommand, + serializeAws_restJson1UpdateRuleCommand, +} from "../protocols/Aws_restJson1"; +import { RbinClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RbinClient"; + +export interface UpdateRuleCommandInput extends UpdateRuleRequest {} +export interface UpdateRuleCommandOutput extends UpdateRuleResponse, __MetadataBearer {} + +/** + *

                                  Updates an existing Recycle Bin retention rule. For more information, see + * Update Recycle Bin retention rules in the Amazon EC2 User Guide.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RbinClient, UpdateRuleCommand } from "@aws-sdk/client-rbin"; // ES Modules import + * // const { RbinClient, UpdateRuleCommand } = require("@aws-sdk/client-rbin"); // CommonJS import + * const client = new RbinClient(config); + * const command = new UpdateRuleCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateRuleCommandInput} for command's `input` shape. + * @see {@link UpdateRuleCommandOutput} for command's `response` shape. + * @see {@link RbinClientResolvedConfig | config} for RbinClient's `config` shape. + * + */ +export class UpdateRuleCommand extends $Command< + UpdateRuleCommandInput, + UpdateRuleCommandOutput, + RbinClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateRuleCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RbinClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RbinClient"; + const commandName = "UpdateRuleCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateRuleRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateRuleResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateRuleCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateRuleCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateRuleCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rbin/src/commands/index.ts b/clients/client-rbin/src/commands/index.ts new file mode 100644 index 000000000000..ec3b5e1206ac --- /dev/null +++ b/clients/client-rbin/src/commands/index.ts @@ -0,0 +1,8 @@ +export * from "./CreateRuleCommand"; +export * from "./DeleteRuleCommand"; +export * from "./GetRuleCommand"; +export * from "./ListRulesCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateRuleCommand"; diff --git a/clients/client-rbin/src/endpoints.ts b/clients/client-rbin/src/endpoints.ts new file mode 100644 index 000000000000..1ffa0a4e5702 --- /dev/null +++ b/clients/client-rbin/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rbin.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "rbin-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "rbin-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "rbin.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rbin.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "rbin-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "rbin-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "rbin.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rbin.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "rbin-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rbin.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "rbin-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rbin.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "rbin-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "rbin-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "rbin.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "rbin", + regionHash, + partitionHash, + }); diff --git a/clients/client-rbin/src/index.ts b/clients/client-rbin/src/index.ts new file mode 100644 index 000000000000..699a868c21f7 --- /dev/null +++ b/clients/client-rbin/src/index.ts @@ -0,0 +1,5 @@ +export * from "./Rbin"; +export * from "./RbinClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-rbin/src/models/index.ts b/clients/client-rbin/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-rbin/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-rbin/src/models/models_0.ts b/clients/client-rbin/src/models/models_0.ts new file mode 100644 index 000000000000..ef80ad773304 --- /dev/null +++ b/clients/client-rbin/src/models/models_0.ts @@ -0,0 +1,619 @@ +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                                  Information about a resource tag used to identify resources that are to be retained by a Recycle Bin retention rule.

                                  + */ +export interface ResourceTag { + /** + *

                                  The tag key.

                                  + */ + ResourceTagKey: string | undefined; + + /** + *

                                  The tag value.

                                  + */ + ResourceTagValue?: string; +} + +export namespace ResourceTag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceTag): any => ({ + ...obj, + }); +} + +export enum ResourceType { + EBS_SNAPSHOT = "EBS_SNAPSHOT", +} + +export enum RetentionPeriodUnit { + DAYS = "DAYS", +} + +/** + *

                                  Information about the retention period for which a retention rule is to retain resources.

                                  + */ +export interface RetentionPeriod { + /** + *

                                  The period value for which the retention rule is to retain resources. The period is measured using + * the unit specified for RetentionPeriodUnit.

                                  + */ + RetentionPeriodValue: number | undefined; + + /** + *

                                  The unit of time in which the retention period is measured. Currently, only DAYS + * is supported.

                                  + */ + RetentionPeriodUnit: RetentionPeriodUnit | string | undefined; +} + +export namespace RetentionPeriod { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RetentionPeriod): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about the tags assigned to a Recycle Bin retention rule.

                                  + */ +export interface Tag { + /** + *

                                  The tag key.

                                  + */ + Key: string | undefined; + + /** + *

                                  The tag value.

                                  + */ + Value: string | undefined; +} + +export namespace Tag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Tag): any => ({ + ...obj, + }); +} + +export interface CreateRuleRequest { + /** + *

                                  Information about the retention period for which the retention rule is to retain resources.

                                  + */ + RetentionPeriod: RetentionPeriod | undefined; + + /** + *

                                  A brief description for the retention rule.

                                  + */ + Description?: string; + + /** + *

                                  Information about the tags to assign to the retention rule.

                                  + */ + Tags?: Tag[]; + + /** + *

                                  The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are + * supported.

                                  + */ + ResourceType: ResourceType | string | undefined; + + /** + *

                                  Information about the resource tags to use to identify resources that are to be retained + * by the retention rule. The retention rule retains only deleted snapshots that have one or more + * of the specified tag key and value pairs. If a snapshot is deleted, but it does not have + * any of the specified tag key and value pairs, it is immediately deleted without being retained + * by the retention rule.

                                  + *

                                  You can add the same tag key and value pair to a maximum or five retention rules.

                                  + */ + ResourceTags?: ResourceTag[]; +} + +export namespace CreateRuleRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateRuleRequest): any => ({ + ...obj, + }); +} + +export enum RuleStatus { + AVAILABLE = "available", + PENDING = "pending", +} + +export interface CreateRuleResponse { + /** + *

                                  The unique identifier of the retention rule.

                                  + */ + Identifier?: string; + + /** + *

                                  Information about the retention period for which a retention rule is to retain resources.

                                  + */ + RetentionPeriod?: RetentionPeriod; + + /** + *

                                  The retention rule description.

                                  + */ + Description?: string; + + /** + *

                                  The tags assigned to the retention rule.

                                  + */ + Tags?: Tag[]; + + /** + *

                                  The resource type retained by the retention rule.

                                  + */ + ResourceType?: ResourceType | string; + + /** + *

                                  Information about the resource tags used to identify resources that are retained by the retention + * rule.

                                  + */ + ResourceTags?: ResourceTag[]; + + /** + *

                                  The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                  + */ + Status?: RuleStatus | string; +} + +export namespace CreateRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateRuleResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  The service could not respond to the request due to an internal problem.

                                  + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + Message?: string; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +export enum ServiceQuotaExceededExceptionReason { + SERVICE_QUOTA_EXCEEDED = "SERVICE_QUOTA_EXCEEDED", +} + +/** + *

                                  The request would cause a service quota for the number of tags per resource to be exceeded.

                                  + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + Message?: string; + /** + *

                                  The reason for the exception.

                                  + */ + Reason?: ServiceQuotaExceededExceptionReason | string; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +export enum ValidationExceptionReason { + INVALID_PAGE_TOKEN = "INVALID_PAGE_TOKEN", + INVALID_PARAMETER_VALUE = "INVALID_PARAMETER_VALUE", +} + +/** + *

                                  One or more of the parameters in the request is not valid.

                                  + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + Message?: string; + /** + *

                                  The reason for the exception.

                                  + */ + Reason?: ValidationExceptionReason | string; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +export interface DeleteRuleRequest { + /** + *

                                  The unique ID of the retention rule to delete.

                                  + */ + Identifier: string | undefined; +} + +export namespace DeleteRuleRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRuleRequest): any => ({ + ...obj, + }); +} + +export interface DeleteRuleResponse {} + +export namespace DeleteRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteRuleResponse): any => ({ + ...obj, + }); +} + +export enum ResourceNotFoundExceptionReason { + RULE_NOT_FOUND = "RULE_NOT_FOUND", +} + +/** + *

                                  The specified resource was not found.

                                  + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + Message?: string; + /** + *

                                  The reason for the exception.

                                  + */ + Reason?: ResourceNotFoundExceptionReason | string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +export interface GetRuleRequest { + /** + *

                                  The unique ID of the retention rule.

                                  + */ + Identifier: string | undefined; +} + +export namespace GetRuleRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRuleRequest): any => ({ + ...obj, + }); +} + +export interface GetRuleResponse { + /** + *

                                  The unique ID of the retention rule.

                                  + */ + Identifier?: string; + + /** + *

                                  The description assigned to the retention rule.

                                  + */ + Description?: string; + + /** + *

                                  The resource type retained by the retention rule. Currently, only Amazon EBS snapshots are supported.

                                  + */ + ResourceType?: ResourceType | string; + + /** + *

                                  Information about the period for which the retention rule retains resources.

                                  + */ + RetentionPeriod?: RetentionPeriod; + + /** + *

                                  The resource tags used to identify resources that are to be retained by the retention rule.

                                  + */ + ResourceTags?: ResourceTag[]; + + /** + *

                                  The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                  + */ + Status?: RuleStatus | string; +} + +export namespace GetRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetRuleResponse): any => ({ + ...obj, + }); +} + +export interface ListRulesRequest { + /** + *

                                  The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

                                  + */ + MaxResults?: number; + + /** + *

                                  The token to use to retrieve the next page of results.

                                  + */ + NextToken?: string; + + /** + *

                                  The resource type retained by the retention rule. Only retention rules that retain the specified resource type + * are listed.

                                  + */ + ResourceType: ResourceType | string | undefined; + + /** + *

                                  The tags used to identify resources that are to be retained by the retention rule.

                                  + */ + ResourceTags?: ResourceTag[]; +} + +export namespace ListRulesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListRulesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  Information about a Recycle Bin retention rule.

                                  + */ +export interface RuleSummary { + /** + *

                                  The unique ID of the retention rule.

                                  + */ + Identifier?: string; + + /** + *

                                  The description for the retention rule.

                                  + */ + Description?: string; + + /** + *

                                  Information about the retention period for which the retention rule retains resources

                                  + */ + RetentionPeriod?: RetentionPeriod; +} + +export namespace RuleSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RuleSummary): any => ({ + ...obj, + }); +} + +export interface ListRulesResponse { + /** + *

                                  Information about the retention rules.

                                  + */ + Rules?: RuleSummary[]; + + /** + *

                                  The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                  + */ + NextToken?: string; +} + +export namespace ListRulesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListRulesResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) of the resource for which to list the tags.

                                  + */ + ResourceArn: string | undefined; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                                  Information about the tags assigned to the resource.

                                  + */ + Tags?: Tag[]; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) of the resource to which to assign the tags.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  Information about the tags to assign to the resource.

                                  + */ + Tags: Tag[] | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                                  The Amazon Resource Name (ARN) of the resource from which to unassign the tags.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  Information about the tags to unassign from the resource.

                                  + */ + TagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateRuleRequest { + /** + *

                                  The unique ID of the retention rule to update.

                                  + */ + Identifier: string | undefined; + + /** + *

                                  Information about the retention period for which the retention rule is to retain resources.

                                  + */ + RetentionPeriod?: RetentionPeriod; + + /** + *

                                  The retention rule description.

                                  + */ + Description?: string; + + /** + *

                                  The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are supported.

                                  + */ + ResourceType?: ResourceType | string; + + /** + *

                                  Information about the resource tags to use to identify resources that are to be retained + * by the retention rule. The retention rule retains only deleted snapshots that have one or more + * of the specified tag key and value pairs. If a snapshot is deleted, but it does not have + * any of the specified tag key and value pairs, it is immediately deleted without being retained + * by the retention rule.

                                  + *

                                  You can add the same tag key and value pair to a maximum or five retention rules.

                                  + */ + ResourceTags?: ResourceTag[]; +} + +export namespace UpdateRuleRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateRuleRequest): any => ({ + ...obj, + }); +} + +export interface UpdateRuleResponse { + /** + *

                                  The unique ID of the retention rule.

                                  + */ + Identifier?: string; + + /** + *

                                  Information about the retention period for which a retention rule is to retain resources.

                                  + */ + RetentionPeriod?: RetentionPeriod; + + /** + *

                                  The retention rule description.

                                  + */ + Description?: string; + + /** + *

                                  The resource type retained by the retention rule.

                                  + */ + ResourceType?: ResourceType | string; + + /** + *

                                  Information about the resource tags used to identify resources that are retained by the retention + * rule.

                                  + */ + ResourceTags?: ResourceTag[]; + + /** + *

                                  The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                  + */ + Status?: RuleStatus | string; +} + +export namespace UpdateRuleResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateRuleResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-rbin/src/pagination/Interfaces.ts b/clients/client-rbin/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..4e8865f558cf --- /dev/null +++ b/clients/client-rbin/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { Rbin } from "../Rbin"; +import { RbinClient } from "../RbinClient"; + +export interface RbinPaginationConfiguration extends PaginationConfiguration { + client: Rbin | RbinClient; +} diff --git a/clients/client-rbin/src/pagination/ListRulesPaginator.ts b/clients/client-rbin/src/pagination/ListRulesPaginator.ts new file mode 100644 index 000000000000..f60c81670454 --- /dev/null +++ b/clients/client-rbin/src/pagination/ListRulesPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListRulesCommand, ListRulesCommandInput, ListRulesCommandOutput } from "../commands/ListRulesCommand"; +import { Rbin } from "../Rbin"; +import { RbinClient } from "../RbinClient"; +import { RbinPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: RbinClient, + input: ListRulesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListRulesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: Rbin, + input: ListRulesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listRules(input, ...args); +}; +export async function* paginateListRules( + config: RbinPaginationConfiguration, + input: ListRulesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListRulesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof Rbin) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof RbinClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected Rbin | RbinClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-rbin/src/pagination/index.ts b/clients/client-rbin/src/pagination/index.ts new file mode 100644 index 000000000000..44e8f0f768d1 --- /dev/null +++ b/clients/client-rbin/src/pagination/index.ts @@ -0,0 +1,2 @@ +export * from "./Interfaces"; +export * from "./ListRulesPaginator"; diff --git a/clients/client-rbin/src/protocols/Aws_restJson1.ts b/clients/client-rbin/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..9dc6dddfd4cc --- /dev/null +++ b/clients/client-rbin/src/protocols/Aws_restJson1.ts @@ -0,0 +1,1171 @@ +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + expectInt32 as __expectInt32, + expectNonNull as __expectNonNull, + expectObject as __expectObject, + expectString as __expectString, + extendedEncodeURIComponent as __extendedEncodeURIComponent, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { CreateRuleCommandInput, CreateRuleCommandOutput } from "../commands/CreateRuleCommand"; +import { DeleteRuleCommandInput, DeleteRuleCommandOutput } from "../commands/DeleteRuleCommand"; +import { GetRuleCommandInput, GetRuleCommandOutput } from "../commands/GetRuleCommand"; +import { ListRulesCommandInput, ListRulesCommandOutput } from "../commands/ListRulesCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateRuleCommandInput, UpdateRuleCommandOutput } from "../commands/UpdateRuleCommand"; +import { + InternalServerException, + ResourceNotFoundException, + ResourceTag, + RetentionPeriod, + RuleSummary, + ServiceQuotaExceededException, + Tag, + ValidationException, +} from "../models/models_0"; + +export const serializeAws_restJson1CreateRuleCommand = async ( + input: CreateRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/rules"; + let body: any; + body = JSON.stringify({ + ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.ResourceTags !== undefined && + input.ResourceTags !== null && { ResourceTags: serializeAws_restJson1ResourceTags(input.ResourceTags, context) }), + ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), + ...(input.RetentionPeriod !== undefined && + input.RetentionPeriod !== null && { + RetentionPeriod: serializeAws_restJson1RetentionPeriod(input.RetentionPeriod, context), + }), + ...(input.Tags !== undefined && + input.Tags !== null && { Tags: serializeAws_restJson1TagList(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteRuleCommand = async ( + input: DeleteRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/rules/{Identifier}"; + if (input.Identifier !== undefined) { + const labelValue: string = input.Identifier; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Identifier."); + } + resolvedPath = resolvedPath.replace("{Identifier}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Identifier."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetRuleCommand = async ( + input: GetRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/rules/{Identifier}"; + if (input.Identifier !== undefined) { + const labelValue: string = input.Identifier; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Identifier."); + } + resolvedPath = resolvedPath.replace("{Identifier}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Identifier."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListRulesCommand = async ( + input: ListRulesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/list-rules"; + let body: any; + body = JSON.stringify({ + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.ResourceTags !== undefined && + input.ResourceTags !== null && { ResourceTags: serializeAws_restJson1ResourceTags(input.ResourceTags, context) }), + ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.Tags !== undefined && + input.Tags !== null && { Tags: serializeAws_restJson1TagList(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + const query: any = { + ...(input.TagKeys !== undefined && { tagKeys: (input.TagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateRuleCommand = async ( + input: UpdateRuleCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/rules/{Identifier}"; + if (input.Identifier !== undefined) { + const labelValue: string = input.Identifier; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Identifier."); + } + resolvedPath = resolvedPath.replace("{Identifier}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Identifier."); + } + let body: any; + body = JSON.stringify({ + ...(input.Description !== undefined && input.Description !== null && { Description: input.Description }), + ...(input.ResourceTags !== undefined && + input.ResourceTags !== null && { ResourceTags: serializeAws_restJson1ResourceTags(input.ResourceTags, context) }), + ...(input.ResourceType !== undefined && input.ResourceType !== null && { ResourceType: input.ResourceType }), + ...(input.RetentionPeriod !== undefined && + input.RetentionPeriod !== null && { + RetentionPeriod: serializeAws_restJson1RetentionPeriod(input.RetentionPeriod, context), + }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1CreateRuleCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 201 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateRuleCommandError(output, context); + } + const contents: CreateRuleCommandOutput = { + $metadata: deserializeMetadata(output), + Description: undefined, + Identifier: undefined, + ResourceTags: undefined, + ResourceType: undefined, + RetentionPeriod: undefined, + Status: undefined, + Tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Description !== undefined && data.Description !== null) { + contents.Description = __expectString(data.Description); + } + if (data.Identifier !== undefined && data.Identifier !== null) { + contents.Identifier = __expectString(data.Identifier); + } + if (data.ResourceTags !== undefined && data.ResourceTags !== null) { + contents.ResourceTags = deserializeAws_restJson1ResourceTags(data.ResourceTags, context); + } + if (data.ResourceType !== undefined && data.ResourceType !== null) { + contents.ResourceType = __expectString(data.ResourceType); + } + if (data.RetentionPeriod !== undefined && data.RetentionPeriod !== null) { + contents.RetentionPeriod = deserializeAws_restJson1RetentionPeriod(data.RetentionPeriod, context); + } + if (data.Status !== undefined && data.Status !== null) { + contents.Status = __expectString(data.Status); + } + if (data.Tags !== undefined && data.Tags !== null) { + contents.Tags = deserializeAws_restJson1TagList(data.Tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateRuleCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.rbin#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteRuleCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 204 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteRuleCommandError(output, context); + } + const contents: DeleteRuleCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteRuleCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetRuleCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetRuleCommandError(output, context); + } + const contents: GetRuleCommandOutput = { + $metadata: deserializeMetadata(output), + Description: undefined, + Identifier: undefined, + ResourceTags: undefined, + ResourceType: undefined, + RetentionPeriod: undefined, + Status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Description !== undefined && data.Description !== null) { + contents.Description = __expectString(data.Description); + } + if (data.Identifier !== undefined && data.Identifier !== null) { + contents.Identifier = __expectString(data.Identifier); + } + if (data.ResourceTags !== undefined && data.ResourceTags !== null) { + contents.ResourceTags = deserializeAws_restJson1ResourceTags(data.ResourceTags, context); + } + if (data.ResourceType !== undefined && data.ResourceType !== null) { + contents.ResourceType = __expectString(data.ResourceType); + } + if (data.RetentionPeriod !== undefined && data.RetentionPeriod !== null) { + contents.RetentionPeriod = deserializeAws_restJson1RetentionPeriod(data.RetentionPeriod, context); + } + if (data.Status !== undefined && data.Status !== null) { + contents.Status = __expectString(data.Status); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetRuleCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListRulesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListRulesCommandError(output, context); + } + const contents: ListRulesCommandOutput = { + $metadata: deserializeMetadata(output), + NextToken: undefined, + Rules: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + if (data.Rules !== undefined && data.Rules !== null) { + contents.Rules = deserializeAws_restJson1RuleSummaryList(data.Rules, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListRulesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + Tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Tags !== undefined && data.Tags !== null) { + contents.Tags = deserializeAws_restJson1TagList(data.Tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 201 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.rbin#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 204 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateRuleCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateRuleCommandError(output, context); + } + const contents: UpdateRuleCommandOutput = { + $metadata: deserializeMetadata(output), + Description: undefined, + Identifier: undefined, + ResourceTags: undefined, + ResourceType: undefined, + RetentionPeriod: undefined, + Status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Description !== undefined && data.Description !== null) { + contents.Description = __expectString(data.Description); + } + if (data.Identifier !== undefined && data.Identifier !== null) { + contents.Identifier = __expectString(data.Identifier); + } + if (data.ResourceTags !== undefined && data.ResourceTags !== null) { + contents.ResourceTags = deserializeAws_restJson1ResourceTags(data.ResourceTags, context); + } + if (data.ResourceType !== undefined && data.ResourceType !== null) { + contents.ResourceType = __expectString(data.ResourceType); + } + if (data.RetentionPeriod !== undefined && data.RetentionPeriod !== null) { + contents.RetentionPeriod = deserializeAws_restJson1RetentionPeriod(data.RetentionPeriod, context); + } + if (data.Status !== undefined && data.Status !== null) { + contents.Status = __expectString(data.Status); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateRuleCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rbin#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rbin#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rbin#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + Reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + if (data.Reason !== undefined && data.Reason !== null) { + contents.Reason = __expectString(data.Reason); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + Reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + if (data.Reason !== undefined && data.Reason !== null) { + contents.Reason = __expectString(data.Reason); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + Message: undefined, + Reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.Message !== undefined && data.Message !== null) { + contents.Message = __expectString(data.Message); + } + if (data.Reason !== undefined && data.Reason !== null) { + contents.Reason = __expectString(data.Reason); + } + return contents; +}; + +const serializeAws_restJson1ResourceTag = (input: ResourceTag, context: __SerdeContext): any => { + return { + ...(input.ResourceTagKey !== undefined && + input.ResourceTagKey !== null && { ResourceTagKey: input.ResourceTagKey }), + ...(input.ResourceTagValue !== undefined && + input.ResourceTagValue !== null && { ResourceTagValue: input.ResourceTagValue }), + }; +}; + +const serializeAws_restJson1ResourceTags = (input: ResourceTag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1ResourceTag(entry, context); + }); +}; + +const serializeAws_restJson1RetentionPeriod = (input: RetentionPeriod, context: __SerdeContext): any => { + return { + ...(input.RetentionPeriodUnit !== undefined && + input.RetentionPeriodUnit !== null && { RetentionPeriodUnit: input.RetentionPeriodUnit }), + ...(input.RetentionPeriodValue !== undefined && + input.RetentionPeriodValue !== null && { RetentionPeriodValue: input.RetentionPeriodValue }), + }; +}; + +const serializeAws_restJson1Tag = (input: Tag, context: __SerdeContext): any => { + return { + ...(input.Key !== undefined && input.Key !== null && { Key: input.Key }), + ...(input.Value !== undefined && input.Value !== null && { Value: input.Value }), + }; +}; + +const serializeAws_restJson1TagList = (input: Tag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1Tag(entry, context); + }); +}; + +const deserializeAws_restJson1ResourceTag = (output: any, context: __SerdeContext): ResourceTag => { + return { + ResourceTagKey: __expectString(output.ResourceTagKey), + ResourceTagValue: __expectString(output.ResourceTagValue), + } as any; +}; + +const deserializeAws_restJson1ResourceTags = (output: any, context: __SerdeContext): ResourceTag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ResourceTag(entry, context); + }); +}; + +const deserializeAws_restJson1RetentionPeriod = (output: any, context: __SerdeContext): RetentionPeriod => { + return { + RetentionPeriodUnit: __expectString(output.RetentionPeriodUnit), + RetentionPeriodValue: __expectInt32(output.RetentionPeriodValue), + } as any; +}; + +const deserializeAws_restJson1RuleSummary = (output: any, context: __SerdeContext): RuleSummary => { + return { + Description: __expectString(output.Description), + Identifier: __expectString(output.Identifier), + RetentionPeriod: + output.RetentionPeriod !== undefined && output.RetentionPeriod !== null + ? deserializeAws_restJson1RetentionPeriod(output.RetentionPeriod, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1RuleSummaryList = (output: any, context: __SerdeContext): RuleSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1RuleSummary(entry, context); + }); +}; + +const deserializeAws_restJson1Tag = (output: any, context: __SerdeContext): Tag => { + return { + Key: __expectString(output.Key), + Value: __expectString(output.Value), + } as any; +}; + +const deserializeAws_restJson1TagList = (output: any, context: __SerdeContext): Tag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Tag(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-rbin/src/runtimeConfig.browser.ts b/clients/client-rbin/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..6421e8080eb2 --- /dev/null +++ b/clients/client-rbin/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { RbinClientConfig } from "./RbinClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RbinClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-rbin/src/runtimeConfig.native.ts b/clients/client-rbin/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..ef9c78d5a7d1 --- /dev/null +++ b/clients/client-rbin/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { RbinClientConfig } from "./RbinClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RbinClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-rbin/src/runtimeConfig.shared.ts b/clients/client-rbin/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..79a2f9fbabb7 --- /dev/null +++ b/clients/client-rbin/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { RbinClientConfig } from "./RbinClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RbinClientConfig) => ({ + apiVersion: "2021-06-15", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "rbin", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-rbin/src/runtimeConfig.ts b/clients/client-rbin/src/runtimeConfig.ts new file mode 100644 index 000000000000..e07dcd3d71ef --- /dev/null +++ b/clients/client-rbin/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { RbinClientConfig } from "./RbinClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RbinClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-rbin/tsconfig.es.json b/clients/client-rbin/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-rbin/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-rbin/tsconfig.json b/clients/client-rbin/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-rbin/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-rbin/tsconfig.types.json b/clients/client-rbin/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-rbin/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-redshift-data/src/RedshiftData.ts b/clients/client-redshift-data/src/RedshiftData.ts index c7dc3f4411fe..0a2187513ddd 100644 --- a/clients/client-redshift-data/src/RedshiftData.ts +++ b/clients/client-redshift-data/src/RedshiftData.ts @@ -59,13 +59,13 @@ export class RedshiftData extends RedshiftDataClient { * method, use one of the following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ @@ -172,13 +172,13 @@ export class RedshiftData extends RedshiftDataClient { * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the database - * user name. Permission to call the redshift:GetClusterCredentials operation is - * required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ @@ -218,13 +218,13 @@ export class RedshiftData extends RedshiftDataClient { * method, use one of the following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ @@ -297,13 +297,13 @@ export class RedshiftData extends RedshiftDataClient { * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ @@ -343,13 +343,13 @@ export class RedshiftData extends RedshiftDataClient { * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ @@ -417,13 +417,13 @@ export class RedshiftData extends RedshiftDataClient { * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  */ diff --git a/clients/client-redshift-data/src/commands/BatchExecuteStatementCommand.ts b/clients/client-redshift-data/src/commands/BatchExecuteStatementCommand.ts index 3b38d4788b3d..122dc26f8c99 100644 --- a/clients/client-redshift-data/src/commands/BatchExecuteStatementCommand.ts +++ b/clients/client-redshift-data/src/commands/BatchExecuteStatementCommand.ts @@ -28,13 +28,13 @@ export interface BatchExecuteStatementCommandOutput extends BatchExecuteStatemen * method, use one of the following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/commands/DescribeTableCommand.ts b/clients/client-redshift-data/src/commands/DescribeTableCommand.ts index 8f8e2c68aa7d..c19def1d0f92 100644 --- a/clients/client-redshift-data/src/commands/DescribeTableCommand.ts +++ b/clients/client-redshift-data/src/commands/DescribeTableCommand.ts @@ -29,13 +29,13 @@ export interface DescribeTableCommandOutput extends DescribeTableResponse, __Met * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the database - * user name. Permission to call the redshift:GetClusterCredentials operation is - * required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/commands/ExecuteStatementCommand.ts b/clients/client-redshift-data/src/commands/ExecuteStatementCommand.ts index c595c453da37..748b53396eaa 100644 --- a/clients/client-redshift-data/src/commands/ExecuteStatementCommand.ts +++ b/clients/client-redshift-data/src/commands/ExecuteStatementCommand.ts @@ -28,13 +28,13 @@ export interface ExecuteStatementCommandOutput extends ExecuteStatementOutput, _ * method, use one of the following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/commands/ListDatabasesCommand.ts b/clients/client-redshift-data/src/commands/ListDatabasesCommand.ts index 11b6a3a29cc3..ee20a9118a9c 100644 --- a/clients/client-redshift-data/src/commands/ListDatabasesCommand.ts +++ b/clients/client-redshift-data/src/commands/ListDatabasesCommand.ts @@ -28,13 +28,13 @@ export interface ListDatabasesCommandOutput extends ListDatabasesResponse, __Met * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/commands/ListSchemasCommand.ts b/clients/client-redshift-data/src/commands/ListSchemasCommand.ts index a9be230b1c81..7b28be5ad5c4 100644 --- a/clients/client-redshift-data/src/commands/ListSchemasCommand.ts +++ b/clients/client-redshift-data/src/commands/ListSchemasCommand.ts @@ -28,13 +28,13 @@ export interface ListSchemasCommandOutput extends ListSchemasResponse, __Metadat * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/commands/ListTablesCommand.ts b/clients/client-redshift-data/src/commands/ListTablesCommand.ts index 39a7b5942e10..918cc33ac1e9 100644 --- a/clients/client-redshift-data/src/commands/ListTablesCommand.ts +++ b/clients/client-redshift-data/src/commands/ListTablesCommand.ts @@ -29,13 +29,13 @@ export interface ListTablesCommandOutput extends ListTablesResponse, __MetadataB * following combinations of request parameters:

                                  *
                                    *
                                  • - *

                                    Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the - * cluster identifier that matches the cluster in the secret.

                                    + *

                                    Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret. + * When connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                    *
                                  • *
                                  • - *

                                    Temporary credentials - specify the cluster identifier, the database name, and the - * database user name. Permission to call the redshift:GetClusterCredentials - * operation is required to use this method.

                                    + *

                                    Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. + * Also, permission to call the redshift:GetClusterCredentials operation is required. + * When connecting to a serverless endpoint, specify the database name.

                                    *
                                  • *
                                  * @example diff --git a/clients/client-redshift-data/src/models/models_0.ts b/clients/client-redshift-data/src/models/models_0.ts index 986cfb0c7733..d3ff9a8246bd 100644 --- a/clients/client-redshift-data/src/models/models_0.ts +++ b/clients/client-redshift-data/src/models/models_0.ts @@ -47,9 +47,9 @@ export interface BatchExecuteStatementInput { Sqls: string[] | undefined; /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

                                  @@ -57,7 +57,7 @@ export interface BatchExecuteStatementInput { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; @@ -99,7 +99,7 @@ export interface BatchExecuteStatementOutput { CreatedAt?: Date; /** - *

                                  The cluster identifier.

                                  + *

                                  The cluster identifier. This parameter is not returned when connecting to a serverless endpoint.

                                  */ ClusterIdentifier?: string; @@ -182,6 +182,24 @@ export namespace CancelStatementResponse { }); } +/** + *

                                  Connection to a database failed.

                                  + */ +export interface DatabaseConnectionException extends __SmithyException, $MetadataBearer { + name: "DatabaseConnectionException"; + $fault: "server"; + Message: string | undefined; +} + +export namespace DatabaseConnectionException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DatabaseConnectionException): any => ({ + ...obj, + }); +} + /** *

                                  The Amazon Redshift Data API operation failed due to invalid input.

                                  */ @@ -434,7 +452,7 @@ export interface SubStatementData { RedshiftQueryId?: number; /** - *

                                  A value that indicates whether the statement has a result set. The result set can be empty.

                                  + *

                                  A value that indicates whether the statement has a result set. The result set can be empty. The value is true for an empty result set.

                                  */ HasResultSet?: boolean; } @@ -529,7 +547,8 @@ export interface DescribeStatementResponse { RedshiftPid?: number; /** - *

                                  A value that indicates whether the statement has a result set. The result set can be empty.

                                  + *

                                  A value that indicates whether the statement has a result set. The result set can be empty. The value is true for an empty result set. + * The value is true if any substatement returns a result set.

                                  */ HasResultSet?: boolean; @@ -578,9 +597,9 @@ export namespace DescribeStatementResponse { export interface DescribeTableRequest { /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

                                  @@ -588,7 +607,7 @@ export interface DescribeTableRequest { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; @@ -694,9 +713,9 @@ export interface ExecuteStatementInput { Sql: string | undefined; /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

                                  @@ -704,7 +723,7 @@ export interface ExecuteStatementInput { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; @@ -750,7 +769,7 @@ export interface ExecuteStatementOutput { CreatedAt?: Date; /** - *

                                  The cluster identifier.

                                  + *

                                  The cluster identifier. This parameter is not returned when connecting to a serverless endpoint.

                                  */ ClusterIdentifier?: string; @@ -973,9 +992,9 @@ export namespace GetStatementResultResponse { export interface ListDatabasesRequest { /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name of the database. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  @@ -988,7 +1007,7 @@ export interface ListDatabasesRequest { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; @@ -1036,9 +1055,9 @@ export namespace ListDatabasesResponse { export interface ListSchemasRequest { /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

                                  @@ -1046,7 +1065,7 @@ export interface ListSchemasRequest { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; @@ -1263,9 +1282,9 @@ export namespace ListStatementsResponse { export interface ListTablesRequest { /** - *

                                  The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                  + *

                                  The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                  */ - ClusterIdentifier: string | undefined; + ClusterIdentifier?: string; /** *

                                  The name or ARN of the secret that enables access to the database. This parameter is required when authenticating using Secrets Manager.

                                  @@ -1273,7 +1292,7 @@ export interface ListTablesRequest { SecretArn?: string; /** - *

                                  The database user name. This parameter is required when authenticating using temporary credentials.

                                  + *

                                  The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                  */ DbUser?: string; diff --git a/clients/client-redshift-data/src/protocols/Aws_json1_1.ts b/clients/client-redshift-data/src/protocols/Aws_json1_1.ts index 1ff29e7a1ad1..90532194b320 100644 --- a/clients/client-redshift-data/src/protocols/Aws_json1_1.ts +++ b/clients/client-redshift-data/src/protocols/Aws_json1_1.ts @@ -40,6 +40,7 @@ import { CancelStatementRequest, CancelStatementResponse, ColumnMetadata, + DatabaseConnectionException, DescribeStatementRequest, DescribeStatementResponse, DescribeTableRequest, @@ -296,6 +297,14 @@ const deserializeAws_json1_1CancelStatementCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "DatabaseConnectionException": + case "com.amazonaws.redshiftdata#DatabaseConnectionException": + response = { + ...(await deserializeAws_json1_1DatabaseConnectionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.redshiftdata#InternalServerException": response = { @@ -436,6 +445,14 @@ const deserializeAws_json1_1DescribeTableCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "DatabaseConnectionException": + case "com.amazonaws.redshiftdata#DatabaseConnectionException": + response = { + ...(await deserializeAws_json1_1DatabaseConnectionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.redshiftdata#InternalServerException": response = { @@ -638,6 +655,14 @@ const deserializeAws_json1_1ListDatabasesCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "DatabaseConnectionException": + case "com.amazonaws.redshiftdata#DatabaseConnectionException": + response = { + ...(await deserializeAws_json1_1DatabaseConnectionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.redshiftdata#InternalServerException": response = { @@ -700,6 +725,14 @@ const deserializeAws_json1_1ListSchemasCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "DatabaseConnectionException": + case "com.amazonaws.redshiftdata#DatabaseConnectionException": + response = { + ...(await deserializeAws_json1_1DatabaseConnectionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.redshiftdata#InternalServerException": response = { @@ -824,6 +857,14 @@ const deserializeAws_json1_1ListTablesCommandError = async ( let errorCode = "UnknownError"; errorCode = loadRestJsonErrorCode(output, parsedOutput.body); switch (errorCode) { + case "DatabaseConnectionException": + case "com.amazonaws.redshiftdata#DatabaseConnectionException": + response = { + ...(await deserializeAws_json1_1DatabaseConnectionExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; case "InternalServerException": case "com.amazonaws.redshiftdata#InternalServerException": response = { @@ -887,6 +928,21 @@ const deserializeAws_json1_1BatchExecuteStatementExceptionResponse = async ( return contents; }; +const deserializeAws_json1_1DatabaseConnectionExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const body = parsedOutput.body; + const deserialized: any = deserializeAws_json1_1DatabaseConnectionException(body, context); + const contents: DatabaseConnectionException = { + name: "DatabaseConnectionException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + ...deserialized, + }; + return contents; +}; + const deserializeAws_json1_1ExecuteStatementExceptionResponse = async ( parsedOutput: any, context: __SerdeContext @@ -1186,6 +1242,15 @@ const deserializeAws_json1_1ColumnMetadataList = (output: any, context: __SerdeC }); }; +const deserializeAws_json1_1DatabaseConnectionException = ( + output: any, + context: __SerdeContext +): DatabaseConnectionException => { + return { + Message: __expectString(output.Message), + } as any; +}; + const deserializeAws_json1_1DatabaseList = (output: any, context: __SerdeContext): string[] => { return (output || []) .filter((e: any) => e != null) diff --git a/clients/client-rum/.gitignore b/clients/client-rum/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-rum/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-rum/LICENSE b/clients/client-rum/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-rum/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-rum/README.md b/clients/client-rum/README.md new file mode 100644 index 000000000000..87e9adfcbf17 --- /dev/null +++ b/clients/client-rum/README.md @@ -0,0 +1,211 @@ +# @aws-sdk/client-rum + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-rum/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-rum) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-rum.svg)](https://www.npmjs.com/package/@aws-sdk/client-rum) + +## Description + +AWS SDK for JavaScript RUM Client for Node.js, Browser and React Native. + +

                                  With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about +your web application performance from actual user sessions in real time. The data collected includes page load +times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and +also see breakdowns by the browsers and devices that your customers use.

                                  + +

                                  You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch +RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error +messages, stack traces, and user sessions. You can also use RUM to +understand the range of end-user impact including the number of users, geolocations, and browsers used.

                                  + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-rum +using your favorite package manager: + +- `npm install @aws-sdk/client-rum` +- `yarn add @aws-sdk/client-rum` +- `pnpm add @aws-sdk/client-rum` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `RUMClient` and +the commands you need, for example `CreateAppMonitorCommand`: + +```js +// ES5 example +const { RUMClient, CreateAppMonitorCommand } = require("@aws-sdk/client-rum"); +``` + +```ts +// ES6+ example +import { RUMClient, CreateAppMonitorCommand } from "@aws-sdk/client-rum"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new RUMClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new CreateAppMonitorCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-rum"; +const client = new AWS.RUM({ region: "REGION" }); + +// async/await. +try { + const data = await client.createAppMonitor(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .createAppMonitor(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.createAppMonitor(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-rum` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-rum/jest.config.js b/clients/client-rum/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-rum/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-rum/package.json b/clients/client-rum/package.json new file mode 100644 index 000000000000..380674e0cd2c --- /dev/null +++ b/clients/client-rum/package.json @@ -0,0 +1,94 @@ +{ + "name": "@aws-sdk/client-rum", + "description": "AWS SDK for JavaScript Rum Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-rum", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-rum" + } +} diff --git a/clients/client-rum/src/RUM.ts b/clients/client-rum/src/RUM.ts new file mode 100644 index 000000000000..d7fe6180d29e --- /dev/null +++ b/clients/client-rum/src/RUM.ts @@ -0,0 +1,408 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + CreateAppMonitorCommand, + CreateAppMonitorCommandInput, + CreateAppMonitorCommandOutput, +} from "./commands/CreateAppMonitorCommand"; +import { + DeleteAppMonitorCommand, + DeleteAppMonitorCommandInput, + DeleteAppMonitorCommandOutput, +} from "./commands/DeleteAppMonitorCommand"; +import { + GetAppMonitorCommand, + GetAppMonitorCommandInput, + GetAppMonitorCommandOutput, +} from "./commands/GetAppMonitorCommand"; +import { + GetAppMonitorDataCommand, + GetAppMonitorDataCommandInput, + GetAppMonitorDataCommandOutput, +} from "./commands/GetAppMonitorDataCommand"; +import { + ListAppMonitorsCommand, + ListAppMonitorsCommandInput, + ListAppMonitorsCommandOutput, +} from "./commands/ListAppMonitorsCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + PutRumEventsCommand, + PutRumEventsCommandInput, + PutRumEventsCommandOutput, +} from "./commands/PutRumEventsCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateAppMonitorCommand, + UpdateAppMonitorCommandInput, + UpdateAppMonitorCommandOutput, +} from "./commands/UpdateAppMonitorCommand"; +import { RUMClient } from "./RUMClient"; + +/** + *

                                  With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about + * your web application performance from actual user sessions in real time. The data collected includes page load + * times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and + * also see breakdowns by the browsers and devices that your customers use.

                                  + * + *

                                  You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch + * RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error + * messages, stack traces, and user sessions. You can also use RUM to + * understand the range of end-user impact including the number of users, geolocations, and browsers used.

                                  + */ +export class RUM extends RUMClient { + /** + *

                                  Creates a Amazon CloudWatch RUM app monitor, which collects telemetry data from your application and sends that + * data to RUM. The data includes performance and reliability information such as page load time, client-side errors, + * and user behavior.

                                  + *

                                  You use this operation only to create a new app monitor. To update an existing app monitor, use UpdateAppMonitor instead.

                                  + *

                                  After you create an app monitor, sign in to the CloudWatch RUM console to get + * the JavaScript code snippet to add to your web application. For more information, see + * How do I find a code snippet + * that I've already generated? + *

                                  + */ + public createAppMonitor( + args: CreateAppMonitorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createAppMonitor( + args: CreateAppMonitorCommandInput, + cb: (err: any, data?: CreateAppMonitorCommandOutput) => void + ): void; + public createAppMonitor( + args: CreateAppMonitorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateAppMonitorCommandOutput) => void + ): void; + public createAppMonitor( + args: CreateAppMonitorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateAppMonitorCommandOutput) => void), + cb?: (err: any, data?: CreateAppMonitorCommandOutput) => void + ): Promise | void { + const command = new CreateAppMonitorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Deletes an existing app monitor. This immediately stops the collection of data.

                                  + */ + public deleteAppMonitor( + args: DeleteAppMonitorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteAppMonitor( + args: DeleteAppMonitorCommandInput, + cb: (err: any, data?: DeleteAppMonitorCommandOutput) => void + ): void; + public deleteAppMonitor( + args: DeleteAppMonitorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteAppMonitorCommandOutput) => void + ): void; + public deleteAppMonitor( + args: DeleteAppMonitorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteAppMonitorCommandOutput) => void), + cb?: (err: any, data?: DeleteAppMonitorCommandOutput) => void + ): Promise | void { + const command = new DeleteAppMonitorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves the complete configuration information for one app monitor.

                                  + */ + public getAppMonitor( + args: GetAppMonitorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getAppMonitor( + args: GetAppMonitorCommandInput, + cb: (err: any, data?: GetAppMonitorCommandOutput) => void + ): void; + public getAppMonitor( + args: GetAppMonitorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetAppMonitorCommandOutput) => void + ): void; + public getAppMonitor( + args: GetAppMonitorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetAppMonitorCommandOutput) => void), + cb?: (err: any, data?: GetAppMonitorCommandOutput) => void + ): Promise | void { + const command = new GetAppMonitorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Retrieves the raw performance events that RUM has collected from your web application, + * so that you can do your own processing or analysis of this data.

                                  + */ + public getAppMonitorData( + args: GetAppMonitorDataCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getAppMonitorData( + args: GetAppMonitorDataCommandInput, + cb: (err: any, data?: GetAppMonitorDataCommandOutput) => void + ): void; + public getAppMonitorData( + args: GetAppMonitorDataCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetAppMonitorDataCommandOutput) => void + ): void; + public getAppMonitorData( + args: GetAppMonitorDataCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetAppMonitorDataCommandOutput) => void), + cb?: (err: any, data?: GetAppMonitorDataCommandOutput) => void + ): Promise | void { + const command = new GetAppMonitorDataCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Returns a list of the Amazon CloudWatch RUM app monitors in the account.

                                  + */ + public listAppMonitors( + args: ListAppMonitorsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listAppMonitors( + args: ListAppMonitorsCommandInput, + cb: (err: any, data?: ListAppMonitorsCommandOutput) => void + ): void; + public listAppMonitors( + args: ListAppMonitorsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListAppMonitorsCommandOutput) => void + ): void; + public listAppMonitors( + args: ListAppMonitorsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListAppMonitorsCommandOutput) => void), + cb?: (err: any, data?: ListAppMonitorsCommandOutput) => void + ): Promise | void { + const command = new ListAppMonitorsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Displays the tags associated with a CloudWatch RUM resource.

                                  + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Sends telemetry events about your application performance and user behavior to CloudWatch RUM. The code + * snippet that RUM generates for you to add to your application includes PutRumEvents operations to + * send this data to RUM.

                                  + *

                                  Each PutRumEvents operation can send a batch of events from one user session.

                                  + */ + public putRumEvents( + args: PutRumEventsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public putRumEvents(args: PutRumEventsCommandInput, cb: (err: any, data?: PutRumEventsCommandOutput) => void): void; + public putRumEvents( + args: PutRumEventsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: PutRumEventsCommandOutput) => void + ): void; + public putRumEvents( + args: PutRumEventsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: PutRumEventsCommandOutput) => void), + cb?: (err: any, data?: PutRumEventsCommandOutput) => void + ): Promise | void { + const command = new PutRumEventsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Assigns one or more tags (key-value pairs) to the specified CloudWatch RUM resource. Currently, + * the only resources that + * can be tagged app monitors.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + *

                                  You can use the TagResource action with a resource that already has tags. + * If you specify a new tag key for the resource, + * this tag is appended to the list of tags associated + * with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces + * the previous value for that tag.

                                  + *

                                  You can associate as many as 50 tags with a resource.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Removes one or more tags from the specified resource.

                                  + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                  Updates the configuration of an existing app monitor. When you use this operation, only the parts of the app monitor + * configuration that you specify in this operation are changed. For any parameters that you omit, the existing + * values are kept.

                                  + *

                                  You can't use this operation to change the tags of an existing app monitor. To change the tags of an existing app monitor, use + * TagResource.

                                  + *

                                  To create a new app monitor, use CreateAppMonitor.

                                  + *

                                  After you update an app monitor, sign in to the CloudWatch RUM console to get + * the updated JavaScript code snippet to add to your web application. For more information, see + * How do I find a code snippet + * that I've already generated? + *

                                  + */ + public updateAppMonitor( + args: UpdateAppMonitorCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateAppMonitor( + args: UpdateAppMonitorCommandInput, + cb: (err: any, data?: UpdateAppMonitorCommandOutput) => void + ): void; + public updateAppMonitor( + args: UpdateAppMonitorCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateAppMonitorCommandOutput) => void + ): void; + public updateAppMonitor( + args: UpdateAppMonitorCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateAppMonitorCommandOutput) => void), + cb?: (err: any, data?: UpdateAppMonitorCommandOutput) => void + ): Promise | void { + const command = new UpdateAppMonitorCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-rum/src/RUMClient.ts b/clients/client-rum/src/RUMClient.ts new file mode 100644 index 000000000000..e7e036e9c9f7 --- /dev/null +++ b/clients/client-rum/src/RUMClient.ts @@ -0,0 +1,287 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { CreateAppMonitorCommandInput, CreateAppMonitorCommandOutput } from "./commands/CreateAppMonitorCommand"; +import { DeleteAppMonitorCommandInput, DeleteAppMonitorCommandOutput } from "./commands/DeleteAppMonitorCommand"; +import { GetAppMonitorCommandInput, GetAppMonitorCommandOutput } from "./commands/GetAppMonitorCommand"; +import { GetAppMonitorDataCommandInput, GetAppMonitorDataCommandOutput } from "./commands/GetAppMonitorDataCommand"; +import { ListAppMonitorsCommandInput, ListAppMonitorsCommandOutput } from "./commands/ListAppMonitorsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { PutRumEventsCommandInput, PutRumEventsCommandOutput } from "./commands/PutRumEventsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { UpdateAppMonitorCommandInput, UpdateAppMonitorCommandOutput } from "./commands/UpdateAppMonitorCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | CreateAppMonitorCommandInput + | DeleteAppMonitorCommandInput + | GetAppMonitorCommandInput + | GetAppMonitorDataCommandInput + | ListAppMonitorsCommandInput + | ListTagsForResourceCommandInput + | PutRumEventsCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateAppMonitorCommandInput; + +export type ServiceOutputTypes = + | CreateAppMonitorCommandOutput + | DeleteAppMonitorCommandOutput + | GetAppMonitorCommandOutput + | GetAppMonitorDataCommandOutput + | ListAppMonitorsCommandOutput + | ListTagsForResourceCommandOutput + | PutRumEventsCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateAppMonitorCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type RUMClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of RUMClient class constructor that set the region, credentials and other options. + */ +export interface RUMClientConfig extends RUMClientConfigType {} + +type RUMClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of RUMClient class. This is resolved and normalized from the {@link RUMClientConfig | constructor configuration interface}. + */ +export interface RUMClientResolvedConfig extends RUMClientResolvedConfigType {} + +/** + *

                                  With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about + * your web application performance from actual user sessions in real time. The data collected includes page load + * times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and + * also see breakdowns by the browsers and devices that your customers use.

                                  + * + *

                                  You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch + * RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error + * messages, stack traces, and user sessions. You can also use RUM to + * understand the range of end-user impact including the number of users, geolocations, and browsers used.

                                  + */ +export class RUMClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + RUMClientResolvedConfig +> { + /** + * The resolved configuration of RUMClient class. This is resolved and normalized from the {@link RUMClientConfig | constructor configuration interface}. + */ + readonly config: RUMClientResolvedConfig; + + constructor(configuration: RUMClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-rum/src/commands/CreateAppMonitorCommand.ts b/clients/client-rum/src/commands/CreateAppMonitorCommand.ts new file mode 100644 index 000000000000..a98347291510 --- /dev/null +++ b/clients/client-rum/src/commands/CreateAppMonitorCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateAppMonitorRequest, CreateAppMonitorResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateAppMonitorCommand, + serializeAws_restJson1CreateAppMonitorCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface CreateAppMonitorCommandInput extends CreateAppMonitorRequest {} +export interface CreateAppMonitorCommandOutput extends CreateAppMonitorResponse, __MetadataBearer {} + +/** + *

                                  Creates a Amazon CloudWatch RUM app monitor, which collects telemetry data from your application and sends that + * data to RUM. The data includes performance and reliability information such as page load time, client-side errors, + * and user behavior.

                                  + *

                                  You use this operation only to create a new app monitor. To update an existing app monitor, use UpdateAppMonitor instead.

                                  + *

                                  After you create an app monitor, sign in to the CloudWatch RUM console to get + * the JavaScript code snippet to add to your web application. For more information, see + * How do I find a code snippet + * that I've already generated? + *

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, CreateAppMonitorCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, CreateAppMonitorCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new CreateAppMonitorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateAppMonitorCommandInput} for command's `input` shape. + * @see {@link CreateAppMonitorCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class CreateAppMonitorCommand extends $Command< + CreateAppMonitorCommandInput, + CreateAppMonitorCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateAppMonitorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "CreateAppMonitorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateAppMonitorRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateAppMonitorResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateAppMonitorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateAppMonitorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateAppMonitorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/DeleteAppMonitorCommand.ts b/clients/client-rum/src/commands/DeleteAppMonitorCommand.ts new file mode 100644 index 000000000000..0df8afdeceb2 --- /dev/null +++ b/clients/client-rum/src/commands/DeleteAppMonitorCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteAppMonitorRequest, DeleteAppMonitorResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteAppMonitorCommand, + serializeAws_restJson1DeleteAppMonitorCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface DeleteAppMonitorCommandInput extends DeleteAppMonitorRequest {} +export interface DeleteAppMonitorCommandOutput extends DeleteAppMonitorResponse, __MetadataBearer {} + +/** + *

                                  Deletes an existing app monitor. This immediately stops the collection of data.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, DeleteAppMonitorCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, DeleteAppMonitorCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new DeleteAppMonitorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteAppMonitorCommandInput} for command's `input` shape. + * @see {@link DeleteAppMonitorCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class DeleteAppMonitorCommand extends $Command< + DeleteAppMonitorCommandInput, + DeleteAppMonitorCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteAppMonitorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "DeleteAppMonitorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteAppMonitorRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteAppMonitorResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteAppMonitorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteAppMonitorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteAppMonitorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/GetAppMonitorCommand.ts b/clients/client-rum/src/commands/GetAppMonitorCommand.ts new file mode 100644 index 000000000000..a0217184658b --- /dev/null +++ b/clients/client-rum/src/commands/GetAppMonitorCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetAppMonitorRequest, GetAppMonitorResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetAppMonitorCommand, + serializeAws_restJson1GetAppMonitorCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface GetAppMonitorCommandInput extends GetAppMonitorRequest {} +export interface GetAppMonitorCommandOutput extends GetAppMonitorResponse, __MetadataBearer {} + +/** + *

                                  Retrieves the complete configuration information for one app monitor.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, GetAppMonitorCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, GetAppMonitorCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new GetAppMonitorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetAppMonitorCommandInput} for command's `input` shape. + * @see {@link GetAppMonitorCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class GetAppMonitorCommand extends $Command< + GetAppMonitorCommandInput, + GetAppMonitorCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetAppMonitorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "GetAppMonitorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetAppMonitorRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetAppMonitorResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetAppMonitorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetAppMonitorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetAppMonitorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/GetAppMonitorDataCommand.ts b/clients/client-rum/src/commands/GetAppMonitorDataCommand.ts new file mode 100644 index 000000000000..92d62f736160 --- /dev/null +++ b/clients/client-rum/src/commands/GetAppMonitorDataCommand.ts @@ -0,0 +1,96 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetAppMonitorDataRequest, GetAppMonitorDataResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetAppMonitorDataCommand, + serializeAws_restJson1GetAppMonitorDataCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface GetAppMonitorDataCommandInput extends GetAppMonitorDataRequest {} +export interface GetAppMonitorDataCommandOutput extends GetAppMonitorDataResponse, __MetadataBearer {} + +/** + *

                                  Retrieves the raw performance events that RUM has collected from your web application, + * so that you can do your own processing or analysis of this data.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, GetAppMonitorDataCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, GetAppMonitorDataCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new GetAppMonitorDataCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetAppMonitorDataCommandInput} for command's `input` shape. + * @see {@link GetAppMonitorDataCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class GetAppMonitorDataCommand extends $Command< + GetAppMonitorDataCommandInput, + GetAppMonitorDataCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetAppMonitorDataCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "GetAppMonitorDataCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetAppMonitorDataRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetAppMonitorDataResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetAppMonitorDataCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetAppMonitorDataCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetAppMonitorDataCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/ListAppMonitorsCommand.ts b/clients/client-rum/src/commands/ListAppMonitorsCommand.ts new file mode 100644 index 000000000000..71fc4c10c75e --- /dev/null +++ b/clients/client-rum/src/commands/ListAppMonitorsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListAppMonitorsRequest, ListAppMonitorsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListAppMonitorsCommand, + serializeAws_restJson1ListAppMonitorsCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface ListAppMonitorsCommandInput extends ListAppMonitorsRequest {} +export interface ListAppMonitorsCommandOutput extends ListAppMonitorsResponse, __MetadataBearer {} + +/** + *

                                  Returns a list of the Amazon CloudWatch RUM app monitors in the account.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, ListAppMonitorsCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, ListAppMonitorsCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new ListAppMonitorsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListAppMonitorsCommandInput} for command's `input` shape. + * @see {@link ListAppMonitorsCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class ListAppMonitorsCommand extends $Command< + ListAppMonitorsCommandInput, + ListAppMonitorsCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListAppMonitorsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "ListAppMonitorsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListAppMonitorsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListAppMonitorsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListAppMonitorsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListAppMonitorsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListAppMonitorsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/ListTagsForResourceCommand.ts b/clients/client-rum/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..7f6026741947 --- /dev/null +++ b/clients/client-rum/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                                  Displays the tags associated with a CloudWatch RUM resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, ListTagsForResourceCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, ListTagsForResourceCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/PutRumEventsCommand.ts b/clients/client-rum/src/commands/PutRumEventsCommand.ts new file mode 100644 index 000000000000..e30f9036e45d --- /dev/null +++ b/clients/client-rum/src/commands/PutRumEventsCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { PutRumEventsRequest, PutRumEventsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1PutRumEventsCommand, + serializeAws_restJson1PutRumEventsCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface PutRumEventsCommandInput extends PutRumEventsRequest {} +export interface PutRumEventsCommandOutput extends PutRumEventsResponse, __MetadataBearer {} + +/** + *

                                  Sends telemetry events about your application performance and user behavior to CloudWatch RUM. The code + * snippet that RUM generates for you to add to your application includes PutRumEvents operations to + * send this data to RUM.

                                  + *

                                  Each PutRumEvents operation can send a batch of events from one user session.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, PutRumEventsCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, PutRumEventsCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new PutRumEventsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link PutRumEventsCommandInput} for command's `input` shape. + * @see {@link PutRumEventsCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class PutRumEventsCommand extends $Command< + PutRumEventsCommandInput, + PutRumEventsCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: PutRumEventsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "PutRumEventsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: PutRumEventsRequest.filterSensitiveLog, + outputFilterSensitiveLog: PutRumEventsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: PutRumEventsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1PutRumEventsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1PutRumEventsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/TagResourceCommand.ts b/clients/client-rum/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..25647906b0a3 --- /dev/null +++ b/clients/client-rum/src/commands/TagResourceCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                                  Assigns one or more tags (key-value pairs) to the specified CloudWatch RUM resource. Currently, + * the only resources that + * can be tagged app monitors.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + *

                                  You can use the TagResource action with a resource that already has tags. + * If you specify a new tag key for the resource, + * this tag is appended to the list of tags associated + * with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces + * the previous value for that tag.

                                  + *

                                  You can associate as many as 50 tags with a resource.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, TagResourceCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, TagResourceCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/UntagResourceCommand.ts b/clients/client-rum/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..f735169ebec1 --- /dev/null +++ b/clients/client-rum/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                                  Removes one or more tags from the specified resource.

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, UntagResourceCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, UntagResourceCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/UpdateAppMonitorCommand.ts b/clients/client-rum/src/commands/UpdateAppMonitorCommand.ts new file mode 100644 index 000000000000..5a0a74329263 --- /dev/null +++ b/clients/client-rum/src/commands/UpdateAppMonitorCommand.ts @@ -0,0 +1,105 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateAppMonitorRequest, UpdateAppMonitorResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateAppMonitorCommand, + serializeAws_restJson1UpdateAppMonitorCommand, +} from "../protocols/Aws_restJson1"; +import { RUMClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../RUMClient"; + +export interface UpdateAppMonitorCommandInput extends UpdateAppMonitorRequest {} +export interface UpdateAppMonitorCommandOutput extends UpdateAppMonitorResponse, __MetadataBearer {} + +/** + *

                                  Updates the configuration of an existing app monitor. When you use this operation, only the parts of the app monitor + * configuration that you specify in this operation are changed. For any parameters that you omit, the existing + * values are kept.

                                  + *

                                  You can't use this operation to change the tags of an existing app monitor. To change the tags of an existing app monitor, use + * TagResource.

                                  + *

                                  To create a new app monitor, use CreateAppMonitor.

                                  + *

                                  After you update an app monitor, sign in to the CloudWatch RUM console to get + * the updated JavaScript code snippet to add to your web application. For more information, see + * How do I find a code snippet + * that I've already generated? + *

                                  + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { RUMClient, UpdateAppMonitorCommand } from "@aws-sdk/client-rum"; // ES Modules import + * // const { RUMClient, UpdateAppMonitorCommand } = require("@aws-sdk/client-rum"); // CommonJS import + * const client = new RUMClient(config); + * const command = new UpdateAppMonitorCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateAppMonitorCommandInput} for command's `input` shape. + * @see {@link UpdateAppMonitorCommandOutput} for command's `response` shape. + * @see {@link RUMClientResolvedConfig | config} for RUMClient's `config` shape. + * + */ +export class UpdateAppMonitorCommand extends $Command< + UpdateAppMonitorCommandInput, + UpdateAppMonitorCommandOutput, + RUMClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateAppMonitorCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: RUMClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "RUMClient"; + const commandName = "UpdateAppMonitorCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateAppMonitorRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateAppMonitorResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateAppMonitorCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateAppMonitorCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateAppMonitorCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-rum/src/commands/index.ts b/clients/client-rum/src/commands/index.ts new file mode 100644 index 000000000000..7836b7e33c9f --- /dev/null +++ b/clients/client-rum/src/commands/index.ts @@ -0,0 +1,10 @@ +export * from "./CreateAppMonitorCommand"; +export * from "./DeleteAppMonitorCommand"; +export * from "./GetAppMonitorCommand"; +export * from "./GetAppMonitorDataCommand"; +export * from "./ListAppMonitorsCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./PutRumEventsCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateAppMonitorCommand"; diff --git a/clients/client-rum/src/endpoints.ts b/clients/client-rum/src/endpoints.ts new file mode 100644 index 000000000000..eecc012244f1 --- /dev/null +++ b/clients/client-rum/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rum.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "rum-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "rum-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "rum.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rum.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "rum-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "rum-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "rum.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rum.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "rum-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rum.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "rum-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "rum.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "rum-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "rum-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "rum.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "rum", + regionHash, + partitionHash, + }); diff --git a/clients/client-rum/src/index.ts b/clients/client-rum/src/index.ts new file mode 100644 index 000000000000..9a60d62720f8 --- /dev/null +++ b/clients/client-rum/src/index.ts @@ -0,0 +1,5 @@ +export * from "./RUM"; +export * from "./RUMClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-rum/src/models/index.ts b/clients/client-rum/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-rum/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-rum/src/models/models_0.ts b/clients/client-rum/src/models/models_0.ts new file mode 100644 index 000000000000..0780f667e154 --- /dev/null +++ b/clients/client-rum/src/models/models_0.ts @@ -0,0 +1,1016 @@ +import { LazyJsonString as __LazyJsonString } from "@aws-sdk/smithy-client"; +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                                  You don't have sufficient permissions to perform this action.

                                  + */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + message: string | undefined; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +export enum Telemetry { + /** + * Includes JS error event plugin + */ + ERRORS = "errors", + /** + * Includes X-Ray Xhr and X-Ray Fetch plugin + */ + HTTP = "http", + /** + * Includes navigation, paint, resource and web vital event plugins + */ + PERFORMANCE = "performance", +} + +/** + *

                                  This structure contains much of the configuration data for the app monitor.

                                  + */ +export interface AppMonitorConfiguration { + /** + *

                                  The ID of the Amazon Cognito identity pool + * that is used to authorize the sending of data to RUM.

                                  + */ + IdentityPoolId?: string; + + /** + *

                                  A list of URLs in your website or application to exclude from RUM data collection.

                                  + *

                                  You can't include both ExcludedPages and IncludedPages in the same operation.

                                  + */ + ExcludedPages?: string[]; + + /** + *

                                  If this app monitor is to collect data from only certain pages in your application, this structure lists those pages.

                                  + * + *

                                  You can't include both ExcludedPages and IncludedPages in the same operation.

                                  + */ + IncludedPages?: string[]; + + /** + *

                                  A list of pages in the CloudWatch RUM console that are to be displayed with a "favorite" icon.

                                  + */ + FavoritePages?: string[]; + + /** + *

                                  Specifies the percentage of user sessions to use for RUM data collection. Choosing a higher percentage gives you + * more data but also incurs more costs.

                                  + *

                                  The number you specify is the percentage of user sessions that will be used.

                                  + *

                                  If you omit this parameter, the default of 10 is used.

                                  + */ + SessionSampleRate?: number; + + /** + *

                                  The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool + * that is used to authorize the sending of data to RUM.

                                  + */ + GuestRoleArn?: string; + + /** + *

                                  If you set this to true, the RUM web client sets two cookies, a session + * cookie and a user cookie. The cookies allow the RUM web client to collect data relating to + * the number of users an application has and the behavior of the application across a + * sequence of events. Cookies are stored in the top-level domain of the current page.

                                  + */ + AllowCookies?: boolean; + + /** + *

                                  An array that lists the types of telemetry data that this app monitor is to collect.

                                  + *
                                    + *
                                  • + *

                                    + * errors indicates that RUM collects data about unhandled JavaScript errors raised + * by your application.

                                    + *
                                  • + *
                                  • + *

                                    + * performance indicates that RUM collects performance data about how your application + * and its resources are loaded and rendered. This includes Core Web Vitals.

                                    + *
                                  • + *
                                  • + *

                                    + * http indicates that RUM collects data about HTTP errors thrown by your application.

                                    + *
                                  • + *
                                  + */ + Telemetries?: (Telemetry | string)[]; + + /** + *

                                  If you set this to true, RUM enables X-Ray tracing for + * the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed + * HTTP requests. It also records an X-Ray segment for allowed HTTP requests. + * You can see traces and segments from these user sessions in the X-Ray console + * and the CloudWatch ServiceLens console. For more information, see What is X-Ray? + *

                                  + */ + EnableXRay?: boolean; +} + +export namespace AppMonitorConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AppMonitorConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains the information about whether the app monitor stores copies of the data + * that RUM collects in CloudWatch Logs. If it does, this structure also contains the name of the log group.

                                  + */ +export interface CwLog { + /** + *

                                  Indicated whether the app monitor stores copies of the data + * that RUM collects in CloudWatch Logs.

                                  + */ + CwLogEnabled?: boolean; + + /** + *

                                  The name of the log group where the copies are stored.

                                  + */ + CwLogGroup?: string; +} + +export namespace CwLog { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CwLog): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about whether this app monitor stores a copy of + * the telemetry data that RUM collects using CloudWatch Logs.

                                  + */ +export interface DataStorage { + /** + *

                                  A structure that contains the information about whether the app monitor stores copies of the data + * that RUM collects in CloudWatch Logs. If it does, this structure also contains the name of the log group.

                                  + */ + CwLog?: CwLog; +} + +export namespace DataStorage { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DataStorage): any => ({ + ...obj, + }); +} + +export enum StateEnum { + ACTIVE = "ACTIVE", + CREATED = "CREATED", + DELETING = "DELETING", +} + +/** + *

                                  A RUM app monitor collects telemetry data from your application and sends that + * data to RUM. The data includes performance and reliability information such as page load time, client-side errors, + * and user behavior.

                                  + */ +export interface AppMonitor { + /** + *

                                  The name of the app monitor.

                                  + */ + Name?: string; + + /** + *

                                  The top-level internet domain name for which your application has administrative authority.

                                  + */ + Domain?: string; + + /** + *

                                  The unique ID of this app monitor.

                                  + */ + Id?: string; + + /** + *

                                  The date and time that this app monitor was created.

                                  + */ + Created?: string; + + /** + *

                                  The date and time of the most recent changes to this app monitor's configuration.

                                  + */ + LastModified?: string; + + /** + *

                                  The list of tag keys and values associated with this app monitor.

                                  + */ + Tags?: { [key: string]: string }; + + /** + *

                                  The current state of the app monitor.

                                  + */ + State?: StateEnum | string; + + /** + *

                                  A structure that contains much of the configuration data for the app monitor.

                                  + */ + AppMonitorConfiguration?: AppMonitorConfiguration; + + /** + *

                                  A structure that contains information about whether this app monitor stores a copy of + * the telemetry data that RUM collects using CloudWatch Logs.

                                  + */ + DataStorage?: DataStorage; +} + +export namespace AppMonitor { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AppMonitor): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about the RUM app monitor.

                                  + */ +export interface AppMonitorDetails { + /** + *

                                  The name of the app monitor.

                                  + */ + name?: string; + + /** + *

                                  The unique ID of the app monitor.

                                  + */ + id?: string; + + /** + *

                                  The version of the app monitor.

                                  + */ + version?: string; +} + +export namespace AppMonitorDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AppMonitorDetails): any => ({ + ...obj, + }); +} + +/** + *

                                  This operation attempted to create a resource that already exists.

                                  + */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message: string | undefined; + /** + *

                                  The name of the resource that is associated with the error.

                                  + */ + resourceName: string | undefined; + + /** + *

                                  The type of the resource that is associated with the error.

                                  + */ + resourceType?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +export interface CreateAppMonitorRequest { + /** + *

                                  A name for the app monitor.

                                  + */ + Name: string | undefined; + + /** + *

                                  The top-level internet domain name for which your application has administrative authority.

                                  + */ + Domain: string | undefined; + + /** + *

                                  Assigns one or more tags (key-value pairs) to the app monitor.

                                  + *

                                  Tags can help you organize and categorize your resources. You can also use them to scope user + * permissions by granting a user + * permission to access or change only resources with certain tag values.

                                  + *

                                  Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                  + * + *

                                  You can associate as many as 50 tags with an app monitor.

                                  + *

                                  For more information, see Tagging Amazon Web Services resources.

                                  + */ + Tags?: { [key: string]: string }; + + /** + *

                                  A structure that contains much of the configuration data for the app monitor. If you are using + * Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the + * Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own + * authorization method. For more information, see + * Authorize your application + * to send data to Amazon Web Services.

                                  + *

                                  If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.

                                  + */ + AppMonitorConfiguration?: AppMonitorConfiguration; + + /** + *

                                  Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM + * sends a copy of this telemetry data to Amazon CloudWatch Logs + * in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur + * Amazon CloudWatch Logs charges.

                                  + *

                                  If you omit this parameter, the default is false.

                                  + */ + CwLogEnabled?: boolean; +} + +export namespace CreateAppMonitorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateAppMonitorRequest): any => ({ + ...obj, + }); +} + +export interface CreateAppMonitorResponse { + /** + *

                                  The unique ID of the new app monitor.

                                  + */ + Id?: string; +} + +export namespace CreateAppMonitorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateAppMonitorResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Internal service exception.

                                  + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + $retryable: {}; + message: string | undefined; + /** + *

                                  The value of a parameter in the request caused an error.

                                  + */ + retryAfterSeconds?: number; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

                                  This request exceeds a service quota.

                                  + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + message: string | undefined; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +/** + *

                                  The request was throttled because of quota limits.

                                  + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + $retryable: { + throttling: true; + }; + message: string | undefined; + /** + *

                                  The ID of the service that is associated with the error.

                                  + */ + serviceCode?: string; + + /** + *

                                  The ID of the service quota that was exceeded.

                                  + */ + quotaCode?: string; + + /** + *

                                  The value of a parameter in the request caused an error.

                                  + */ + retryAfterSeconds?: number; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + +/** + *

                                  One of the arguments for the request is not valid.

                                  + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message: string | undefined; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +export interface DeleteAppMonitorRequest { + /** + *

                                  The name of the app monitor to delete.

                                  + */ + Name: string | undefined; +} + +export namespace DeleteAppMonitorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteAppMonitorRequest): any => ({ + ...obj, + }); +} + +export interface DeleteAppMonitorResponse {} + +export namespace DeleteAppMonitorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteAppMonitorResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  Resource not found.

                                  + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message: string | undefined; + /** + *

                                  The name of the resource that is associated with the error.

                                  + */ + resourceName: string | undefined; + + /** + *

                                  The type of the resource that is associated with the error.

                                  + */ + resourceType?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +export interface GetAppMonitorRequest { + /** + *

                                  The app monitor to retrieve information for.

                                  + */ + Name: string | undefined; +} + +export namespace GetAppMonitorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetAppMonitorRequest): any => ({ + ...obj, + }); +} + +export interface GetAppMonitorResponse { + /** + *

                                  A structure containing all the configuration information for the app monitor.

                                  + */ + AppMonitor?: AppMonitor; +} + +export namespace GetAppMonitorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetAppMonitorResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines a key and values that you can use to filter the results. The + * only performance events that are returned are those that have values matching the ones that + * you specify in one of your QueryFilter structures.

                                  + *

                                  For example, you could specify Browser as the Name + * and specify Chrome,Firefox as the Values to return + * events generated only from those browsers.

                                  + *

                                  Specifying Invert as the Name + * works as a "not equal to" filter. For example, specify Invert as the Name + * and specify Chrome as the value to return all events except events from + * user sessions with the Chrome browser.

                                  + */ +export interface QueryFilter { + /** + *

                                  The name of a key to search for. + * The filter returns only the events that match the Name + * and Values that you specify. + *

                                  + *

                                  Valid values for Name are Browser | Device | Country | + * Page | OS | EventType | Invert + *

                                  + */ + Name?: string; + + /** + *

                                  The values of the Name that are to be be included in the returned results.

                                  + */ + Values?: string[]; +} + +export namespace QueryFilter { + /** + * @internal + */ + export const filterSensitiveLog = (obj: QueryFilter): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that defines the time range that you want to retrieve results from.

                                  + */ +export interface TimeRange { + /** + *

                                  The beginning of the time range to retrieve performance events from.

                                  + */ + After: number | undefined; + + /** + *

                                  The end of the time range to retrieve performance events from. If you omit this, the time + * range extends to the time that this operation is performed.

                                  + */ + Before?: number; +} + +export namespace TimeRange { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TimeRange): any => ({ + ...obj, + }); +} + +export interface GetAppMonitorDataRequest { + /** + *

                                  The name of the app monitor that collected the data that you want to retrieve.

                                  + */ + Name: string | undefined; + + /** + *

                                  A structure that defines the time range that you want to retrieve results from.

                                  + */ + TimeRange: TimeRange | undefined; + + /** + *

                                  An array of structures that you can use to filter the results to those that match one or + * more sets of key-value pairs that you specify.

                                  + */ + Filters?: QueryFilter[]; + + /** + *

                                  The maximum number of results to return in one operation.

                                  + */ + MaxResults?: number; + + /** + *

                                  Use the token returned by the previous operation to request the next page of results.

                                  + */ + NextToken?: string; +} + +export namespace GetAppMonitorDataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetAppMonitorDataRequest): any => ({ + ...obj, + }); +} + +export interface GetAppMonitorDataResponse { + /** + *

                                  The events that RUM collected that match your request.

                                  + */ + Events?: string[]; + + /** + *

                                  A token that you can use in a subsequent operation to retrieve the next set of + * results.

                                  + */ + NextToken?: string; +} + +export namespace GetAppMonitorDataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetAppMonitorDataResponse): any => ({ + ...obj, + }); +} + +export interface ListAppMonitorsRequest { + /** + *

                                  The maximum number of results to return in one operation.

                                  + */ + MaxResults?: number; + + /** + *

                                  Use the token returned by the previous operation to request the next page of results.

                                  + */ + NextToken?: string; +} + +export namespace ListAppMonitorsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAppMonitorsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that includes some data about app monitors and their settings.

                                  + */ +export interface AppMonitorSummary { + /** + *

                                  The name of this app monitor.

                                  + */ + Name?: string; + + /** + *

                                  The unique ID of this app monitor.

                                  + */ + Id?: string; + + /** + *

                                  The date and time that the app monitor was created.

                                  + */ + Created?: string; + + /** + *

                                  The date and time of the most recent changes to this app monitor's configuration.

                                  + */ + LastModified?: string; + + /** + *

                                  The current state of this app monitor.

                                  + */ + State?: StateEnum | string; +} + +export namespace AppMonitorSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AppMonitorSummary): any => ({ + ...obj, + }); +} + +export interface ListAppMonitorsResponse { + /** + *

                                  A token that you can use in a subsequent operation to retrieve the next set of + * results.

                                  + */ + NextToken?: string; + + /** + *

                                  An array of structures that contain information about the returned app monitors.

                                  + */ + AppMonitorSummaries?: AppMonitorSummary[]; +} + +export namespace ListAppMonitorsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListAppMonitorsResponse): any => ({ + ...obj, + }); +} + +export interface UpdateAppMonitorRequest { + /** + *

                                  The name of the app monitor to update.

                                  + */ + Name: string | undefined; + + /** + *

                                  The top-level internet domain name for which your application has administrative authority.

                                  + */ + Domain?: string; + + /** + *

                                  A structure that contains much of the configuration data for the app monitor. If you are using + * Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the + * Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own + * authorization method. For more information, see + * Authorize your application + * to send data to Amazon Web Services.

                                  + */ + AppMonitorConfiguration?: AppMonitorConfiguration; + + /** + *

                                  Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM + * sends a copy of this telemetry data to Amazon CloudWatch Logs + * in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur + * Amazon CloudWatch Logs charges.

                                  + */ + CwLogEnabled?: boolean; +} + +export namespace UpdateAppMonitorRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateAppMonitorRequest): any => ({ + ...obj, + }); +} + +export interface UpdateAppMonitorResponse {} + +export namespace UpdateAppMonitorResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateAppMonitorResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                                  The ARN of the resource that you want to see the tags of.

                                  + */ + ResourceArn: string | undefined; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                                  The ARN of the resource that you are viewing.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  The list of tag keys and values associated with the resource you specified.

                                  + */ + Tags: { [key: string]: string } | undefined; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains the information for one performance event that RUM collects from a user session with your + * application.

                                  + */ +export interface RumEvent { + /** + *

                                  A unique ID for this event.

                                  + */ + id: string | undefined; + + /** + *

                                  The exact time that this event occurred.

                                  + */ + timestamp: Date | undefined; + + /** + *

                                  The JSON schema that denotes the type of event this is, such as a page load or a new session.

                                  + */ + type: string | undefined; + + /** + *

                                  Metadata about this event, which contains a JSON serialization of the identity of the user for + * this session. The user information comes from information such as the HTTP user-agent request header + * and document interface.

                                  + */ + metadata?: __LazyJsonString | string; + + /** + *

                                  A string containing details about the event.

                                  + */ + details: __LazyJsonString | string | undefined; +} + +export namespace RumEvent { + /** + * @internal + */ + export const filterSensitiveLog = (obj: RumEvent): any => ({ + ...obj, + }); +} + +/** + *

                                  A structure that contains information about the user session that this batch of events was collected from.

                                  + */ +export interface UserDetails { + /** + *

                                  The ID of the user for this user session. This ID is generated by RUM and does not include any + * personally identifiable information about the user.

                                  + */ + userId?: string; + + /** + *

                                  The session ID that the performance events are from.

                                  + */ + sessionId?: string; +} + +export namespace UserDetails { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UserDetails): any => ({ + ...obj, + }); +} + +export interface PutRumEventsRequest { + /** + *

                                  The ID of the app monitor that is sending this data.

                                  + */ + Id: string | undefined; + + /** + *

                                  A unique identifier for this batch of RUM event data.

                                  + */ + BatchId: string | undefined; + + /** + *

                                  A structure that contains information about the app monitor that collected this telemetry information.

                                  + */ + AppMonitorDetails: AppMonitorDetails | undefined; + + /** + *

                                  A structure that contains information about the user session that this batch of events was collected from.

                                  + */ + UserDetails: UserDetails | undefined; + + /** + *

                                  An array of structures that contain the telemetry event data.

                                  + */ + RumEvents: RumEvent[] | undefined; +} + +export namespace PutRumEventsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRumEventsRequest): any => ({ + ...obj, + }); +} + +export interface PutRumEventsResponse {} + +export namespace PutRumEventsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PutRumEventsResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                                  The ARN of the CloudWatch RUM resource that you're adding tags to.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  The list of key-value pairs to associate with the resource.

                                  + */ + Tags: { [key: string]: string } | undefined; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                                  The ARN of the CloudWatch RUM resource that you're removing tags from.

                                  + */ + ResourceArn: string | undefined; + + /** + *

                                  The list of tag keys to remove from the resource.

                                  + */ + TagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-rum/src/pagination/GetAppMonitorDataPaginator.ts b/clients/client-rum/src/pagination/GetAppMonitorDataPaginator.ts new file mode 100644 index 000000000000..ce0e0c7aeb3a --- /dev/null +++ b/clients/client-rum/src/pagination/GetAppMonitorDataPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + GetAppMonitorDataCommand, + GetAppMonitorDataCommandInput, + GetAppMonitorDataCommandOutput, +} from "../commands/GetAppMonitorDataCommand"; +import { RUM } from "../RUM"; +import { RUMClient } from "../RUMClient"; +import { RUMPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: RUMClient, + input: GetAppMonitorDataCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new GetAppMonitorDataCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: RUM, + input: GetAppMonitorDataCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.getAppMonitorData(input, ...args); +}; +export async function* paginateGetAppMonitorData( + config: RUMPaginationConfiguration, + input: GetAppMonitorDataCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: GetAppMonitorDataCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof RUM) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof RUMClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected RUM | RUMClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-rum/src/pagination/Interfaces.ts b/clients/client-rum/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..eae9ad11a47c --- /dev/null +++ b/clients/client-rum/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { RUM } from "../RUM"; +import { RUMClient } from "../RUMClient"; + +export interface RUMPaginationConfiguration extends PaginationConfiguration { + client: RUM | RUMClient; +} diff --git a/clients/client-rum/src/pagination/ListAppMonitorsPaginator.ts b/clients/client-rum/src/pagination/ListAppMonitorsPaginator.ts new file mode 100644 index 000000000000..f536a833cc79 --- /dev/null +++ b/clients/client-rum/src/pagination/ListAppMonitorsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListAppMonitorsCommand, + ListAppMonitorsCommandInput, + ListAppMonitorsCommandOutput, +} from "../commands/ListAppMonitorsCommand"; +import { RUM } from "../RUM"; +import { RUMClient } from "../RUMClient"; +import { RUMPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: RUMClient, + input: ListAppMonitorsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListAppMonitorsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: RUM, + input: ListAppMonitorsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listAppMonitors(input, ...args); +}; +export async function* paginateListAppMonitors( + config: RUMPaginationConfiguration, + input: ListAppMonitorsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListAppMonitorsCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof RUM) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof RUMClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected RUM | RUMClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-rum/src/pagination/index.ts b/clients/client-rum/src/pagination/index.ts new file mode 100644 index 000000000000..f1ae162978e5 --- /dev/null +++ b/clients/client-rum/src/pagination/index.ts @@ -0,0 +1,3 @@ +export * from "./GetAppMonitorDataPaginator"; +export * from "./Interfaces"; +export * from "./ListAppMonitorsPaginator"; diff --git a/clients/client-rum/src/protocols/Aws_restJson1.ts b/clients/client-rum/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..ab61b7026b6c --- /dev/null +++ b/clients/client-rum/src/protocols/Aws_restJson1.ts @@ -0,0 +1,1728 @@ +import { + HttpRequest as __HttpRequest, + HttpResponse as __HttpResponse, + isValidHostname as __isValidHostname, +} from "@aws-sdk/protocol-http"; +import { + expectBoolean as __expectBoolean, + expectNonNull as __expectNonNull, + expectObject as __expectObject, + expectString as __expectString, + extendedEncodeURIComponent as __extendedEncodeURIComponent, + LazyJsonString as __LazyJsonString, + limitedParseDouble as __limitedParseDouble, + serializeFloat as __serializeFloat, + strictParseInt32 as __strictParseInt32, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; + +import { CreateAppMonitorCommandInput, CreateAppMonitorCommandOutput } from "../commands/CreateAppMonitorCommand"; +import { DeleteAppMonitorCommandInput, DeleteAppMonitorCommandOutput } from "../commands/DeleteAppMonitorCommand"; +import { GetAppMonitorCommandInput, GetAppMonitorCommandOutput } from "../commands/GetAppMonitorCommand"; +import { GetAppMonitorDataCommandInput, GetAppMonitorDataCommandOutput } from "../commands/GetAppMonitorDataCommand"; +import { ListAppMonitorsCommandInput, ListAppMonitorsCommandOutput } from "../commands/ListAppMonitorsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { PutRumEventsCommandInput, PutRumEventsCommandOutput } from "../commands/PutRumEventsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { UpdateAppMonitorCommandInput, UpdateAppMonitorCommandOutput } from "../commands/UpdateAppMonitorCommand"; +import { + AccessDeniedException, + AppMonitor, + AppMonitorConfiguration, + AppMonitorDetails, + AppMonitorSummary, + ConflictException, + CwLog, + DataStorage, + InternalServerException, + QueryFilter, + ResourceNotFoundException, + RumEvent, + ServiceQuotaExceededException, + Telemetry, + ThrottlingException, + TimeRange, + UserDetails, + ValidationException, +} from "../models/models_0"; + +export const serializeAws_restJson1CreateAppMonitorCommand = async ( + input: CreateAppMonitorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitor"; + let body: any; + body = JSON.stringify({ + ...(input.AppMonitorConfiguration !== undefined && + input.AppMonitorConfiguration !== null && { + AppMonitorConfiguration: serializeAws_restJson1AppMonitorConfiguration(input.AppMonitorConfiguration, context), + }), + ...(input.CwLogEnabled !== undefined && input.CwLogEnabled !== null && { CwLogEnabled: input.CwLogEnabled }), + ...(input.Domain !== undefined && input.Domain !== null && { Domain: input.Domain }), + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_restJson1TagMap(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteAppMonitorCommand = async ( + input: DeleteAppMonitorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitor/{Name}"; + if (input.Name !== undefined) { + const labelValue: string = input.Name; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Name."); + } + resolvedPath = resolvedPath.replace("{Name}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Name."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetAppMonitorCommand = async ( + input: GetAppMonitorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitor/{Name}"; + if (input.Name !== undefined) { + const labelValue: string = input.Name; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Name."); + } + resolvedPath = resolvedPath.replace("{Name}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Name."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetAppMonitorDataCommand = async ( + input: GetAppMonitorDataCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitor/{Name}/data"; + if (input.Name !== undefined) { + const labelValue: string = input.Name; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Name."); + } + resolvedPath = resolvedPath.replace("{Name}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Name."); + } + let body: any; + body = JSON.stringify({ + ...(input.Filters !== undefined && + input.Filters !== null && { Filters: serializeAws_restJson1QueryFilters(input.Filters, context) }), + ...(input.MaxResults !== undefined && input.MaxResults !== null && { MaxResults: input.MaxResults }), + ...(input.NextToken !== undefined && input.NextToken !== null && { NextToken: input.NextToken }), + ...(input.TimeRange !== undefined && + input.TimeRange !== null && { TimeRange: serializeAws_restJson1TimeRange(input.TimeRange, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListAppMonitorsCommand = async ( + input: ListAppMonitorsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitors"; + const query: any = { + ...(input.MaxResults !== undefined && { maxResults: input.MaxResults.toString() }), + ...(input.NextToken !== undefined && { nextToken: input.NextToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1PutRumEventsCommand = async ( + input: PutRumEventsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitors/{Id}"; + if (input.Id !== undefined) { + const labelValue: string = input.Id; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Id."); + } + resolvedPath = resolvedPath.replace("{Id}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Id."); + } + let body: any; + body = JSON.stringify({ + ...(input.AppMonitorDetails !== undefined && + input.AppMonitorDetails !== null && { + AppMonitorDetails: serializeAws_restJson1AppMonitorDetails(input.AppMonitorDetails, context), + }), + ...(input.BatchId !== undefined && input.BatchId !== null && { BatchId: input.BatchId }), + ...(input.RumEvents !== undefined && + input.RumEvents !== null && { RumEvents: serializeAws_restJson1RumEventList(input.RumEvents, context) }), + ...(input.UserDetails !== undefined && + input.UserDetails !== null && { UserDetails: serializeAws_restJson1UserDetails(input.UserDetails, context) }), + }); + let { hostname: resolvedHostname } = await context.endpoint(); + if (context.disableHostPrefix !== true) { + resolvedHostname = "dataplane." + resolvedHostname; + if (!__isValidHostname(resolvedHostname)) { + throw new Error("ValidationError: prefixed hostname must be hostname compatible."); + } + } + return new __HttpRequest({ + protocol, + hostname: resolvedHostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_restJson1TagMap(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{ResourceArn}"; + if (input.ResourceArn !== undefined) { + const labelValue: string = input.ResourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ResourceArn."); + } + resolvedPath = resolvedPath.replace("{ResourceArn}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ResourceArn."); + } + const query: any = { + ...(input.TagKeys !== undefined && { tagKeys: (input.TagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateAppMonitorCommand = async ( + input: UpdateAppMonitorCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/appmonitor/{Name}"; + if (input.Name !== undefined) { + const labelValue: string = input.Name; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: Name."); + } + resolvedPath = resolvedPath.replace("{Name}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: Name."); + } + let body: any; + body = JSON.stringify({ + ...(input.AppMonitorConfiguration !== undefined && + input.AppMonitorConfiguration !== null && { + AppMonitorConfiguration: serializeAws_restJson1AppMonitorConfiguration(input.AppMonitorConfiguration, context), + }), + ...(input.CwLogEnabled !== undefined && input.CwLogEnabled !== null && { CwLogEnabled: input.CwLogEnabled }), + ...(input.Domain !== undefined && input.Domain !== null && { Domain: input.Domain }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1CreateAppMonitorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateAppMonitorCommandError(output, context); + } + const contents: CreateAppMonitorCommandOutput = { + $metadata: deserializeMetadata(output), + Id: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Id !== undefined && data.Id !== null) { + contents.Id = __expectString(data.Id); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateAppMonitorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.rum#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.rum#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteAppMonitorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteAppMonitorCommandError(output, context); + } + const contents: DeleteAppMonitorCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteAppMonitorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.rum#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetAppMonitorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetAppMonitorCommandError(output, context); + } + const contents: GetAppMonitorCommandOutput = { + $metadata: deserializeMetadata(output), + AppMonitor: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AppMonitor !== undefined && data.AppMonitor !== null) { + contents.AppMonitor = deserializeAws_restJson1AppMonitor(data.AppMonitor, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetAppMonitorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetAppMonitorDataCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetAppMonitorDataCommandError(output, context); + } + const contents: GetAppMonitorDataCommandOutput = { + $metadata: deserializeMetadata(output), + Events: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Events !== undefined && data.Events !== null) { + contents.Events = deserializeAws_restJson1EventDataList(data.Events, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetAppMonitorDataCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListAppMonitorsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListAppMonitorsCommandError(output, context); + } + const contents: ListAppMonitorsCommandOutput = { + $metadata: deserializeMetadata(output), + AppMonitorSummaries: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.AppMonitorSummaries !== undefined && data.AppMonitorSummaries !== null) { + contents.AppMonitorSummaries = deserializeAws_restJson1AppMonitorSummaryList(data.AppMonitorSummaries, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListAppMonitorsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + ResourceArn: undefined, + Tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.ResourceArn !== undefined && data.ResourceArn !== null) { + contents.ResourceArn = __expectString(data.ResourceArn); + } + if (data.Tags !== undefined && data.Tags !== null) { + contents.Tags = deserializeAws_restJson1TagMap(data.Tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1PutRumEventsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1PutRumEventsCommandError(output, context); + } + const contents: PutRumEventsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1PutRumEventsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateAppMonitorCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateAppMonitorCommandError(output, context); + } + const contents: UpdateAppMonitorCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateAppMonitorCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.rum#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.rum#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.rum#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.rum#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.rum#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.rum#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceName: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceName !== undefined && data.resourceName !== null) { + contents.resourceName = __expectString(data.resourceName); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $retryable: {}, + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + retryAfterSeconds: undefined, + }; + if (parsedOutput.headers["retry-after"] !== undefined) { + contents.retryAfterSeconds = __strictParseInt32(parsedOutput.headers["retry-after"]); + } + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceName: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceName !== undefined && data.resourceName !== null) { + contents.resourceName = __expectString(data.resourceName); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottlingException = { + name: "ThrottlingException", + $fault: "client", + $retryable: { + throttling: true, + }, + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + quotaCode: undefined, + retryAfterSeconds: undefined, + serviceCode: undefined, + }; + if (parsedOutput.headers["retry-after"] !== undefined) { + contents.retryAfterSeconds = __strictParseInt32(parsedOutput.headers["retry-after"]); + } + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.quotaCode !== undefined && data.quotaCode !== null) { + contents.quotaCode = __expectString(data.quotaCode); + } + if (data.serviceCode !== undefined && data.serviceCode !== null) { + contents.serviceCode = __expectString(data.serviceCode); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const serializeAws_restJson1AppMonitorConfiguration = ( + input: AppMonitorConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.AllowCookies !== undefined && input.AllowCookies !== null && { AllowCookies: input.AllowCookies }), + ...(input.EnableXRay !== undefined && input.EnableXRay !== null && { EnableXRay: input.EnableXRay }), + ...(input.ExcludedPages !== undefined && + input.ExcludedPages !== null && { ExcludedPages: serializeAws_restJson1Pages(input.ExcludedPages, context) }), + ...(input.FavoritePages !== undefined && + input.FavoritePages !== null && { + FavoritePages: serializeAws_restJson1FavoritePages(input.FavoritePages, context), + }), + ...(input.GuestRoleArn !== undefined && input.GuestRoleArn !== null && { GuestRoleArn: input.GuestRoleArn }), + ...(input.IdentityPoolId !== undefined && + input.IdentityPoolId !== null && { IdentityPoolId: input.IdentityPoolId }), + ...(input.IncludedPages !== undefined && + input.IncludedPages !== null && { IncludedPages: serializeAws_restJson1Pages(input.IncludedPages, context) }), + ...(input.SessionSampleRate !== undefined && + input.SessionSampleRate !== null && { SessionSampleRate: __serializeFloat(input.SessionSampleRate) }), + ...(input.Telemetries !== undefined && + input.Telemetries !== null && { Telemetries: serializeAws_restJson1Telemetries(input.Telemetries, context) }), + }; +}; + +const serializeAws_restJson1AppMonitorDetails = (input: AppMonitorDetails, context: __SerdeContext): any => { + return { + ...(input.id !== undefined && input.id !== null && { id: input.id }), + ...(input.name !== undefined && input.name !== null && { name: input.name }), + ...(input.version !== undefined && input.version !== null && { version: input.version }), + }; +}; + +const serializeAws_restJson1FavoritePages = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1Pages = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1QueryFilter = (input: QueryFilter, context: __SerdeContext): any => { + return { + ...(input.Name !== undefined && input.Name !== null && { Name: input.Name }), + ...(input.Values !== undefined && + input.Values !== null && { Values: serializeAws_restJson1QueryFilterValueList(input.Values, context) }), + }; +}; + +const serializeAws_restJson1QueryFilters = (input: QueryFilter[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1QueryFilter(entry, context); + }); +}; + +const serializeAws_restJson1QueryFilterValueList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1RumEvent = (input: RumEvent, context: __SerdeContext): any => { + return { + ...(input.details !== undefined && + input.details !== null && { details: __LazyJsonString.fromObject(input.details) }), + ...(input.id !== undefined && input.id !== null && { id: input.id }), + ...(input.metadata !== undefined && + input.metadata !== null && { metadata: __LazyJsonString.fromObject(input.metadata) }), + ...(input.timestamp !== undefined && + input.timestamp !== null && { timestamp: Math.round(input.timestamp.getTime() / 1000) }), + ...(input.type !== undefined && input.type !== null && { type: input.type }), + }; +}; + +const serializeAws_restJson1RumEventList = (input: RumEvent[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1RumEvent(entry, context); + }); +}; + +const serializeAws_restJson1TagMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1Telemetries = (input: (Telemetry | string)[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1TimeRange = (input: TimeRange, context: __SerdeContext): any => { + return { + ...(input.After !== undefined && input.After !== null && { After: input.After }), + ...(input.Before !== undefined && input.Before !== null && { Before: input.Before }), + }; +}; + +const serializeAws_restJson1UserDetails = (input: UserDetails, context: __SerdeContext): any => { + return { + ...(input.sessionId !== undefined && input.sessionId !== null && { sessionId: input.sessionId }), + ...(input.userId !== undefined && input.userId !== null && { userId: input.userId }), + }; +}; + +const deserializeAws_restJson1AppMonitor = (output: any, context: __SerdeContext): AppMonitor => { + return { + AppMonitorConfiguration: + output.AppMonitorConfiguration !== undefined && output.AppMonitorConfiguration !== null + ? deserializeAws_restJson1AppMonitorConfiguration(output.AppMonitorConfiguration, context) + : undefined, + Created: __expectString(output.Created), + DataStorage: + output.DataStorage !== undefined && output.DataStorage !== null + ? deserializeAws_restJson1DataStorage(output.DataStorage, context) + : undefined, + Domain: __expectString(output.Domain), + Id: __expectString(output.Id), + LastModified: __expectString(output.LastModified), + Name: __expectString(output.Name), + State: __expectString(output.State), + Tags: + output.Tags !== undefined && output.Tags !== null + ? deserializeAws_restJson1TagMap(output.Tags, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AppMonitorConfiguration = ( + output: any, + context: __SerdeContext +): AppMonitorConfiguration => { + return { + AllowCookies: __expectBoolean(output.AllowCookies), + EnableXRay: __expectBoolean(output.EnableXRay), + ExcludedPages: + output.ExcludedPages !== undefined && output.ExcludedPages !== null + ? deserializeAws_restJson1Pages(output.ExcludedPages, context) + : undefined, + FavoritePages: + output.FavoritePages !== undefined && output.FavoritePages !== null + ? deserializeAws_restJson1FavoritePages(output.FavoritePages, context) + : undefined, + GuestRoleArn: __expectString(output.GuestRoleArn), + IdentityPoolId: __expectString(output.IdentityPoolId), + IncludedPages: + output.IncludedPages !== undefined && output.IncludedPages !== null + ? deserializeAws_restJson1Pages(output.IncludedPages, context) + : undefined, + SessionSampleRate: __limitedParseDouble(output.SessionSampleRate), + Telemetries: + output.Telemetries !== undefined && output.Telemetries !== null + ? deserializeAws_restJson1Telemetries(output.Telemetries, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1AppMonitorSummary = (output: any, context: __SerdeContext): AppMonitorSummary => { + return { + Created: __expectString(output.Created), + Id: __expectString(output.Id), + LastModified: __expectString(output.LastModified), + Name: __expectString(output.Name), + State: __expectString(output.State), + } as any; +}; + +const deserializeAws_restJson1AppMonitorSummaryList = (output: any, context: __SerdeContext): AppMonitorSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1AppMonitorSummary(entry, context); + }); +}; + +const deserializeAws_restJson1CwLog = (output: any, context: __SerdeContext): CwLog => { + return { + CwLogEnabled: __expectBoolean(output.CwLogEnabled), + CwLogGroup: __expectString(output.CwLogGroup), + } as any; +}; + +const deserializeAws_restJson1DataStorage = (output: any, context: __SerdeContext): DataStorage => { + return { + CwLog: + output.CwLog !== undefined && output.CwLog !== null + ? deserializeAws_restJson1CwLog(output.CwLog, context) + : undefined, + } as any; +}; + +const deserializeAws_restJson1EventDataList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1FavoritePages = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1Pages = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1TagMap = (output: any, context: __SerdeContext): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1Telemetries = (output: any, context: __SerdeContext): (Telemetry | string)[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-rum/src/runtimeConfig.browser.ts b/clients/client-rum/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..6cda536923e0 --- /dev/null +++ b/clients/client-rum/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { RUMClientConfig } from "./RUMClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RUMClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-rum/src/runtimeConfig.native.ts b/clients/client-rum/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..5847108ed17d --- /dev/null +++ b/clients/client-rum/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { RUMClientConfig } from "./RUMClient"; +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RUMClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-rum/src/runtimeConfig.shared.ts b/clients/client-rum/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..7c81124b3fe5 --- /dev/null +++ b/clients/client-rum/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { RUMClientConfig } from "./RUMClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RUMClientConfig) => ({ + apiVersion: "2018-05-10", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "RUM", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-rum/src/runtimeConfig.ts b/clients/client-rum/src/runtimeConfig.ts new file mode 100644 index 000000000000..0ab614992e42 --- /dev/null +++ b/clients/client-rum/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { RUMClientConfig } from "./RUMClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: RUMClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-rum/tsconfig.es.json b/clients/client-rum/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-rum/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-rum/tsconfig.json b/clients/client-rum/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-rum/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-rum/tsconfig.types.json b/clients/client-rum/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-rum/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/clients/client-s3/src/S3.ts b/clients/client-s3/src/S3.ts index ff027da07e50..4b7f7e6317b9 100644 --- a/clients/client-s3/src/S3.ts +++ b/clients/client-s3/src/S3.ts @@ -778,7 +778,17 @@ export class S3 extends S3Client { * defined by Amazon S3. These permissions are then added to the ACL on the object. For more * information, see Access Control List (ACL) Overview and Managing ACLs Using the REST * API.

                                  - * + *

                                  If the bucket that you're copying objects to uses the bucket owner enforced setting for + * S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that + * use this setting only accept PUT requests that don't specify an ACL or PUT requests that + * specify bucket owner full control ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format.

                                  + *

                                  For more information, see Controlling ownership of + * objects and disabling ACLs in the Amazon S3 User Guide.

                                  + * + *

                                  If your bucket uses the bucket owner enforced setting for Object Ownership, + * all objects written to the bucket by any account will be owned by the bucket owner.

                                  + *
                                  *

                                  * Storage Class Options *

                                  @@ -861,9 +871,20 @@ export class S3 extends S3Client { * bucket in a Region other than US East (N. Virginia), your application must be able to * handle 307 redirect. For more information, see Virtual hosting of buckets.

                                  * - *

                                  When creating a bucket using this operation, you can optionally specify the accounts or - * groups that should be granted specific permissions on the bucket. There are two ways to - * grant the appropriate permissions using the request headers.

                                  + *

                                  + * Access control lists (ACLs) + *

                                  + *

                                  When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or + * groups that should be granted specific permissions on the bucket.

                                  + * + *

                                  If your CreateBucket request includes the BucketOwnerEnforced value for + * the x-amz-object-ownership header, your request can either not specify + * an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control + * canned ACL or an equivalent ACL expressed in the XML format. For + * more information, see Controlling object + * ownership in the Amazon S3 User Guide.

                                  + *
                                  + *

                                  There are two ways to grant the appropriate permissions using the request headers.

                                  *
                                    *
                                  • *

                                    Specify a canned ACL using the x-amz-acl request header. Amazon S3 @@ -876,7 +897,7 @@ export class S3 extends S3Client { * x-amz-grant-write, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For - * more information, see Access control list + * more information, see Access control list * (ACL) overview.

                                    *

                                    You specify each grantee as a type=value pair, where the type is one of the * following:

                                    @@ -940,13 +961,30 @@ export class S3 extends S3Client { *

                                    * Permissions *

                                    - *

                                    If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, - * authenticated-read, or if you specify access permissions explicitly through any other ACL, both - * s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the - * CreateBucket request is private, only s3:CreateBucket permission is needed.

                                    - *

                                    If ObjectLockEnabledForBucket is set to true in your CreateBucket request, - * s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

                                    - * + *

                                    In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

                                    + *
                                      + *
                                    • + *

                                      + * ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, + * authenticated-read, or if you specify access permissions explicitly through any other ACL, both + * s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the + * CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

                                      + *
                                    • + *
                                    • + *

                                      + * Object Lock - If + * ObjectLockEnabledForBucket is set to true in your + * CreateBucket request, + * s3:PutBucketObjectLockConfiguration and + * s3:PutBucketVersioning permissions are required.

                                      + *
                                    • + *
                                    • + *

                                      + * S3 Object Ownership - If your CreateBucket + * request includes the the x-amz-object-ownership header, + * s3:PutBucketOwnershipControls permission is required.

                                      + *
                                    • + *
                                    *

                                    The following operations are related to CreateBucket:

                                    *
                                      *
                                    • @@ -1493,8 +1531,8 @@ export class S3 extends S3Client { /** *

                                      Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

                                      - *

                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                      - *

                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                      + *

                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                      + *

                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                      *

                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                      *

                                      Operations related to * DeleteBucketIntelligentTieringConfiguration include:

                                      @@ -2333,6 +2371,13 @@ export class S3 extends S3Client { * return the ACL of the bucket, you must have READ_ACP access to the bucket. If * READ_ACP permission is granted to the anonymous user, you can return the * ACL of the bucket without using an authorization header.

                                      + * + *

                                      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the bucket-owner-full-control + * ACL with the owner being the account that created the bucket. For more information, see + * + * Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                      + *
                                      * *

                                      * Related Resources @@ -2539,8 +2584,8 @@ export class S3 extends S3Client { /** *

                                      Gets the S3 Intelligent-Tiering configuration from the specified bucket.

                                      - *

                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                      - *

                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                      + *

                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                      + *

                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                      *

                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                      *

                                      Operations related to * GetBucketIntelligentTieringConfiguration include:

                                      @@ -2963,9 +3008,9 @@ export class S3 extends S3Client { /** *

                                      Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you * must have the s3:GetBucketOwnershipControls permission. For more information - * about Amazon S3 permissions, see Specifying - * Permissions in a Policy.

                                      - *

                                      For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                      + * about Amazon S3 permissions, see Specifying + * permissions in a policy.

                                      + *

                                      For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                      *

                                      The following operations are related to GetBucketOwnershipControls:

                                      *
                                        *
                                      • @@ -3626,7 +3671,13 @@ export class S3 extends S3Client { *

                                        *

                                        By default, GET returns ACL information about the current version of an object. To * return ACL information about a different version, use the versionId subresource.

                                        - * + * + *

                                        If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the bucket-owner-full-control + * ACL with the owner being the account that created the bucket. For more information, see + * + * Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                        + *
                                        *

                                        The following operations are related to GetObjectAcl:

                                        *
                                          *
                                        • @@ -4202,8 +4253,8 @@ export class S3 extends S3Client { /** *

                                          Lists the S3 Intelligent-Tiering configuration from the specified bucket.

                                          - *

                                          The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                          - *

                                          The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                          + *

                                          The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                          + *

                                          The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                          *

                                          For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                          *

                                          Operations related to * ListBucketIntelligentTieringConfigurations include:

                                          @@ -4884,7 +4935,13 @@ export class S3 extends S3Client { * that updates a bucket ACL using the request body, then you can continue to use that * approach.

                                          * - * + * + *

                                          If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + * You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and + * return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. + * For more information, see Controlling object ownership + * in the Amazon S3 User Guide.

                                          + *
                                          *

                                          * Access Permissions *

                                          @@ -5379,8 +5436,8 @@ export class S3 extends S3Client { /** *

                                          Puts a S3 Intelligent-Tiering configuration to the specified bucket. * You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

                                          - *

                                          The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                          - *

                                          The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                          + *

                                          The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                          + *

                                          The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                          *

                                          For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                          *

                                          Operations related to * PutBucketIntelligentTieringConfiguration include:

                                          @@ -5754,10 +5811,15 @@ export class S3 extends S3Client { * modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the * source bucket. To set the logging status of a bucket, you must be the bucket owner.

                                          * - *

                                          The bucket owner is automatically granted FULL_CONTROL to all logs. You use the - * Grantee request element to grant access to other people. The + *

                                          The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The * Permissions request element specifies the kind of access the grantee has to * the logs.

                                          + * + *

                                          If the target bucket for log delivery uses the bucket owner enforced + * setting for S3 Object Ownership, you can't use the Grantee request element + * to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the + * Amazon S3 User Guide.

                                          + *
                                          * *

                                          * Grantee Values @@ -5802,7 +5864,7 @@ export class S3 extends S3Client { * /> *

                                          * - *

                                          For more information about server access logging, see Server Access Logging.

                                          + *

                                          For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

                                          * *

                                          For more information about creating a bucket, see CreateBucket. For more * information about returning the logging status of a bucket, see GetBucketLogging.

                                          @@ -6038,8 +6100,8 @@ export class S3 extends S3Client { /** *

                                          Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketOwnershipControls permission. For - * more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

                                          - *

                                          For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                          + * more information about Amazon S3 permissions, see Specifying permissions in a policy.

                                          + *

                                          For information about Amazon S3 Object Ownership, see Using object ownership.

                                          *

                                          The following operations are related to PutBucketOwnershipControls:

                                          *
                                            *
                                          • @@ -6678,7 +6740,6 @@ export class S3 extends S3Client { *
                                          • *
                                          * - * *

                                          * Server-side Encryption *

                                          @@ -6700,7 +6761,20 @@ export class S3 extends S3Client { * permissions are then added to the ACL on the object. For more information, see Access Control List * (ACL) Overview and Managing ACLs Using the REST * API.

                                          - * + *

                                          If the bucket that you're uploading objects to uses the bucket owner enforced setting + * for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that + * use this setting only accept PUT requests that don't specify an ACL or PUT requests that + * specify bucket owner full control ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other + * ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a + * 400 error with the error code + * AccessControlListNotSupported.

                                          + *

                                          For more information, see Controlling ownership of + * objects and disabling ACLs in the Amazon S3 User Guide.

                                          + * + *

                                          If your bucket uses the bucket owner enforced setting for Object Ownership, + * all objects written to the bucket by any account will be owned by the bucket owner.

                                          + *
                                          *

                                          * Storage Class Options *

                                          @@ -6772,8 +6846,13 @@ export class S3 extends S3Client { * the ACL on an object using either the request body or the headers. For example, if you have * an existing application that updates a bucket ACL using the request body, you can continue * to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

                                          - * - * + * + *

                                          If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + * You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and + * return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. + * For more information, see Controlling object ownership + * in the Amazon S3 User Guide.

                                          + *
                                          * *

                                          * Access Permissions diff --git a/clients/client-s3/src/commands/CopyObjectCommand.ts b/clients/client-s3/src/commands/CopyObjectCommand.ts index bbd00b47d602..659aa4752228 100644 --- a/clients/client-s3/src/commands/CopyObjectCommand.ts +++ b/clients/client-s3/src/commands/CopyObjectCommand.ts @@ -157,7 +157,17 @@ export interface CopyObjectCommandOutput extends CopyObjectOutput, __MetadataBea * defined by Amazon S3. These permissions are then added to the ACL on the object. For more * information, see Access Control List (ACL) Overview and Managing ACLs Using the REST * API.

                                          - * + *

                                          If the bucket that you're copying objects to uses the bucket owner enforced setting for + * S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that + * use this setting only accept PUT requests that don't specify an ACL or PUT requests that + * specify bucket owner full control ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format.

                                          + *

                                          For more information, see Controlling ownership of + * objects and disabling ACLs in the Amazon S3 User Guide.

                                          + * + *

                                          If your bucket uses the bucket owner enforced setting for Object Ownership, + * all objects written to the bucket by any account will be owned by the bucket owner.

                                          + *
                                          *

                                          * Storage Class Options *

                                          diff --git a/clients/client-s3/src/commands/CreateBucketCommand.ts b/clients/client-s3/src/commands/CreateBucketCommand.ts index b960b948ffb8..bee584ac1992 100644 --- a/clients/client-s3/src/commands/CreateBucketCommand.ts +++ b/clients/client-s3/src/commands/CreateBucketCommand.ts @@ -43,9 +43,20 @@ export interface CreateBucketCommandOutput extends CreateBucketOutput, __Metadat * bucket in a Region other than US East (N. Virginia), your application must be able to * handle 307 redirect. For more information, see Virtual hosting of buckets.

                                          * - *

                                          When creating a bucket using this operation, you can optionally specify the accounts or - * groups that should be granted specific permissions on the bucket. There are two ways to - * grant the appropriate permissions using the request headers.

                                          + *

                                          + * Access control lists (ACLs) + *

                                          + *

                                          When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or + * groups that should be granted specific permissions on the bucket.

                                          + * + *

                                          If your CreateBucket request includes the BucketOwnerEnforced value for + * the x-amz-object-ownership header, your request can either not specify + * an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control + * canned ACL or an equivalent ACL expressed in the XML format. For + * more information, see Controlling object + * ownership in the Amazon S3 User Guide.

                                          + *
                                          + *

                                          There are two ways to grant the appropriate permissions using the request headers.

                                          *
                                            *
                                          • *

                                            Specify a canned ACL using the x-amz-acl request header. Amazon S3 @@ -58,7 +69,7 @@ export interface CreateBucketCommandOutput extends CreateBucketOutput, __Metadat * x-amz-grant-write, x-amz-grant-read-acp, * x-amz-grant-write-acp, and x-amz-grant-full-control * headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For - * more information, see Access control list + * more information, see Access control list * (ACL) overview.

                                            *

                                            You specify each grantee as a type=value pair, where the type is one of the * following:

                                            @@ -122,13 +133,30 @@ export interface CreateBucketCommandOutput extends CreateBucketOutput, __Metadat *

                                            * Permissions *

                                            - *

                                            If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, - * authenticated-read, or if you specify access permissions explicitly through any other ACL, both - * s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the - * CreateBucket request is private, only s3:CreateBucket permission is needed.

                                            - *

                                            If ObjectLockEnabledForBucket is set to true in your CreateBucket request, - * s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

                                            - * + *

                                            In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

                                            + *
                                              + *
                                            • + *

                                              + * ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, + * authenticated-read, or if you specify access permissions explicitly through any other ACL, both + * s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the + * CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

                                              + *
                                            • + *
                                            • + *

                                              + * Object Lock - If + * ObjectLockEnabledForBucket is set to true in your + * CreateBucket request, + * s3:PutBucketObjectLockConfiguration and + * s3:PutBucketVersioning permissions are required.

                                              + *
                                            • + *
                                            • + *

                                              + * S3 Object Ownership - If your CreateBucket + * request includes the the x-amz-object-ownership header, + * s3:PutBucketOwnershipControls permission is required.

                                              + *
                                            • + *
                                            *

                                            The following operations are related to CreateBucket:

                                            *
                                              *
                                            • diff --git a/clients/client-s3/src/commands/DeleteBucketIntelligentTieringConfigurationCommand.ts b/clients/client-s3/src/commands/DeleteBucketIntelligentTieringConfigurationCommand.ts index 711c55f78b64..d40251aaedfa 100644 --- a/clients/client-s3/src/commands/DeleteBucketIntelligentTieringConfigurationCommand.ts +++ b/clients/client-s3/src/commands/DeleteBucketIntelligentTieringConfigurationCommand.ts @@ -25,8 +25,8 @@ export interface DeleteBucketIntelligentTieringConfigurationCommandOutput extend /** *

                                              Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

                                              - *

                                              The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                              - *

                                              The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                              + *

                                              The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                              + *

                                              The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                              *

                                              For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                              *

                                              Operations related to * DeleteBucketIntelligentTieringConfiguration include:

                                              diff --git a/clients/client-s3/src/commands/GetBucketAclCommand.ts b/clients/client-s3/src/commands/GetBucketAclCommand.ts index 21bfe74991ee..aa009ca3029b 100644 --- a/clients/client-s3/src/commands/GetBucketAclCommand.ts +++ b/clients/client-s3/src/commands/GetBucketAclCommand.ts @@ -28,6 +28,13 @@ export interface GetBucketAclCommandOutput extends GetBucketAclOutput, __Metadat * return the ACL of the bucket, you must have READ_ACP access to the bucket. If * READ_ACP permission is granted to the anonymous user, you can return the * ACL of the bucket without using an authorization header.

                                              + * + *

                                              If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the bucket-owner-full-control + * ACL with the owner being the account that created the bucket. For more information, see + * + * Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                              + *
                                              * *

                                              * Related Resources diff --git a/clients/client-s3/src/commands/GetBucketIntelligentTieringConfigurationCommand.ts b/clients/client-s3/src/commands/GetBucketIntelligentTieringConfigurationCommand.ts index c8e2bfcafb13..d7c4074c9165 100644 --- a/clients/client-s3/src/commands/GetBucketIntelligentTieringConfigurationCommand.ts +++ b/clients/client-s3/src/commands/GetBucketIntelligentTieringConfigurationCommand.ts @@ -30,8 +30,8 @@ export interface GetBucketIntelligentTieringConfigurationCommandOutput /** *

                                              Gets the S3 Intelligent-Tiering configuration from the specified bucket.

                                              - *

                                              The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                              - *

                                              The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                              + *

                                              The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                              + *

                                              The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                              *

                                              For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                              *

                                              Operations related to * GetBucketIntelligentTieringConfiguration include:

                                              diff --git a/clients/client-s3/src/commands/GetBucketOwnershipControlsCommand.ts b/clients/client-s3/src/commands/GetBucketOwnershipControlsCommand.ts index e2d521c1d40b..de9b83909116 100644 --- a/clients/client-s3/src/commands/GetBucketOwnershipControlsCommand.ts +++ b/clients/client-s3/src/commands/GetBucketOwnershipControlsCommand.ts @@ -25,9 +25,9 @@ export interface GetBucketOwnershipControlsCommandOutput extends GetBucketOwners /** *

                                              Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you * must have the s3:GetBucketOwnershipControls permission. For more information - * about Amazon S3 permissions, see Specifying - * Permissions in a Policy.

                                              - *

                                              For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                              + * about Amazon S3 permissions, see Specifying + * permissions in a policy.

                                              + *

                                              For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                              *

                                              The following operations are related to GetBucketOwnershipControls:

                                              *
                                                *
                                              • diff --git a/clients/client-s3/src/commands/GetObjectAclCommand.ts b/clients/client-s3/src/commands/GetObjectAclCommand.ts index 3444cb3ce8bb..de79f8526a13 100644 --- a/clients/client-s3/src/commands/GetObjectAclCommand.ts +++ b/clients/client-s3/src/commands/GetObjectAclCommand.ts @@ -31,7 +31,13 @@ export interface GetObjectAclCommandOutput extends GetObjectAclOutput, __Metadat *

                                                *

                                                By default, GET returns ACL information about the current version of an object. To * return ACL information about a different version, use the versionId subresource.

                                                - * + * + *

                                                If your bucket uses the bucket owner enforced setting for S3 Object Ownership, + * requests to read ACLs are still supported and return the bucket-owner-full-control + * ACL with the owner being the account that created the bucket. For more information, see + * + * Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                                + *
                                                *

                                                The following operations are related to GetObjectAcl:

                                                *
                                                  *
                                                • diff --git a/clients/client-s3/src/commands/ListBucketIntelligentTieringConfigurationsCommand.ts b/clients/client-s3/src/commands/ListBucketIntelligentTieringConfigurationsCommand.ts index 654ade7cc64e..e835a9f33f5a 100644 --- a/clients/client-s3/src/commands/ListBucketIntelligentTieringConfigurationsCommand.ts +++ b/clients/client-s3/src/commands/ListBucketIntelligentTieringConfigurationsCommand.ts @@ -30,8 +30,8 @@ export interface ListBucketIntelligentTieringConfigurationsCommandOutput /** *

                                                  Lists the S3 Intelligent-Tiering configuration from the specified bucket.

                                                  - *

                                                  The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                  - *

                                                  The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                  + *

                                                  The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                  + *

                                                  The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                  *

                                                  For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                  *

                                                  Operations related to * ListBucketIntelligentTieringConfigurations include:

                                                  diff --git a/clients/client-s3/src/commands/PutBucketAclCommand.ts b/clients/client-s3/src/commands/PutBucketAclCommand.ts index 268dacb72d23..63a75152e116 100644 --- a/clients/client-s3/src/commands/PutBucketAclCommand.ts +++ b/clients/client-s3/src/commands/PutBucketAclCommand.ts @@ -48,7 +48,13 @@ export interface PutBucketAclCommandOutput extends __MetadataBearer {} * that updates a bucket ACL using the request body, then you can continue to use that * approach.

                                                  * - * + * + *

                                                  If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + * You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and + * return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. + * For more information, see Controlling object ownership + * in the Amazon S3 User Guide.

                                                  + *
                                                  *

                                                  * Access Permissions *

                                                  diff --git a/clients/client-s3/src/commands/PutBucketIntelligentTieringConfigurationCommand.ts b/clients/client-s3/src/commands/PutBucketIntelligentTieringConfigurationCommand.ts index 93909beb9473..9d820af74384 100644 --- a/clients/client-s3/src/commands/PutBucketIntelligentTieringConfigurationCommand.ts +++ b/clients/client-s3/src/commands/PutBucketIntelligentTieringConfigurationCommand.ts @@ -26,8 +26,8 @@ export interface PutBucketIntelligentTieringConfigurationCommandOutput extends _ /** *

                                                  Puts a S3 Intelligent-Tiering configuration to the specified bucket. * You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

                                                  - *

                                                  The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                  - *

                                                  The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                  + *

                                                  The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                  + *

                                                  The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                  *

                                                  For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                  *

                                                  Operations related to * PutBucketIntelligentTieringConfiguration include:

                                                  diff --git a/clients/client-s3/src/commands/PutBucketLoggingCommand.ts b/clients/client-s3/src/commands/PutBucketLoggingCommand.ts index 5fd69859a371..7d05733c9b25 100644 --- a/clients/client-s3/src/commands/PutBucketLoggingCommand.ts +++ b/clients/client-s3/src/commands/PutBucketLoggingCommand.ts @@ -28,10 +28,15 @@ export interface PutBucketLoggingCommandOutput extends __MetadataBearer {} * modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the * source bucket. To set the logging status of a bucket, you must be the bucket owner.

                                                  * - *

                                                  The bucket owner is automatically granted FULL_CONTROL to all logs. You use the - * Grantee request element to grant access to other people. The + *

                                                  The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The * Permissions request element specifies the kind of access the grantee has to * the logs.

                                                  + * + *

                                                  If the target bucket for log delivery uses the bucket owner enforced + * setting for S3 Object Ownership, you can't use the Grantee request element + * to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the + * Amazon S3 User Guide.

                                                  + *
                                                  * *

                                                  * Grantee Values @@ -76,7 +81,7 @@ export interface PutBucketLoggingCommandOutput extends __MetadataBearer {} * /> *

                                                  * - *

                                                  For more information about server access logging, see Server Access Logging.

                                                  + *

                                                  For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

                                                  * *

                                                  For more information about creating a bucket, see CreateBucket. For more * information about returning the logging status of a bucket, see GetBucketLogging.

                                                  diff --git a/clients/client-s3/src/commands/PutBucketOwnershipControlsCommand.ts b/clients/client-s3/src/commands/PutBucketOwnershipControlsCommand.ts index 63c17ca94bce..039c7d841661 100644 --- a/clients/client-s3/src/commands/PutBucketOwnershipControlsCommand.ts +++ b/clients/client-s3/src/commands/PutBucketOwnershipControlsCommand.ts @@ -26,8 +26,8 @@ export interface PutBucketOwnershipControlsCommandOutput extends __MetadataBeare /** *

                                                  Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this * operation, you must have the s3:PutBucketOwnershipControls permission. For - * more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

                                                  - *

                                                  For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                                  + * more information about Amazon S3 permissions, see Specifying permissions in a policy.

                                                  + *

                                                  For information about Amazon S3 Object Ownership, see Using object ownership.

                                                  *

                                                  The following operations are related to PutBucketOwnershipControls:

                                                  *
                                                    *
                                                  • diff --git a/clients/client-s3/src/commands/PutObjectAclCommand.ts b/clients/client-s3/src/commands/PutObjectAclCommand.ts index 9ae9d4048d25..b0be6ff3ed4f 100644 --- a/clients/client-s3/src/commands/PutObjectAclCommand.ts +++ b/clients/client-s3/src/commands/PutObjectAclCommand.ts @@ -33,8 +33,13 @@ export interface PutObjectAclCommandOutput extends PutObjectAclOutput, __Metadat * the ACL on an object using either the request body or the headers. For example, if you have * an existing application that updates a bucket ACL using the request body, you can continue * to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

                                                    - * - * + * + *

                                                    If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. + * You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and + * return the AccessControlListNotSupported error code. Requests to read ACLs are still supported. + * For more information, see Controlling object ownership + * in the Amazon S3 User Guide.

                                                    + *
                                                    * *

                                                    * Access Permissions diff --git a/clients/client-s3/src/commands/PutObjectCommand.ts b/clients/client-s3/src/commands/PutObjectCommand.ts index 8f670111eb9d..1545e63eb794 100644 --- a/clients/client-s3/src/commands/PutObjectCommand.ts +++ b/clients/client-s3/src/commands/PutObjectCommand.ts @@ -65,7 +65,6 @@ export interface PutObjectCommandOutput extends PutObjectOutput, __MetadataBeare *

                                                  • *
                                                  * - * *

                                                  * Server-side Encryption *

                                                  @@ -87,7 +86,20 @@ export interface PutObjectCommandOutput extends PutObjectOutput, __MetadataBeare * permissions are then added to the ACL on the object. For more information, see Access Control List * (ACL) Overview and Managing ACLs Using the REST * API.

                                                  - * + *

                                                  If the bucket that you're uploading objects to uses the bucket owner enforced setting + * for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that + * use this setting only accept PUT requests that don't specify an ACL or PUT requests that + * specify bucket owner full control ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other + * ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a + * 400 error with the error code + * AccessControlListNotSupported.

                                                  + *

                                                  For more information, see Controlling ownership of + * objects and disabling ACLs in the Amazon S3 User Guide.

                                                  + * + *

                                                  If your bucket uses the bucket owner enforced setting for Object Ownership, + * all objects written to the bucket by any account will be owned by the bucket owner.

                                                  + *
                                                  *

                                                  * Storage Class Options *

                                                  diff --git a/clients/client-s3/src/models/models_0.ts b/clients/client-s3/src/models/models_0.ts index 2cee053fde37..98ccb097f310 100644 --- a/clients/client-s3/src/models/models_0.ts +++ b/clients/client-s3/src/models/models_0.ts @@ -585,6 +585,7 @@ export type ObjectLockMode = "COMPLIANCE" | "GOVERNANCE"; export type StorageClass = | "DEEP_ARCHIVE" | "GLACIER" + | "GLACIER_IR" | "INTELLIGENT_TIERING" | "ONEZONE_IA" | "OUTPOSTS" @@ -1001,6 +1002,8 @@ export namespace CreateBucketConfiguration { }); } +export type ObjectOwnership = "BucketOwnerEnforced" | "BucketOwnerPreferred" | "ObjectWriter"; + export interface CreateBucketRequest { /** *

                                                  The canned ACL to apply to the bucket.

                                                  @@ -1048,6 +1051,21 @@ export interface CreateBucketRequest { *

                                                  Specifies whether you want S3 Object Lock to be enabled for the new bucket.

                                                  */ ObjectLockEnabledForBucket?: boolean; + + /** + *

                                                  The container element for object ownership for a bucket's ownership controls.

                                                  + *

                                                  BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket + * owner if the objects are uploaded with the bucket-owner-full-control canned + * ACL.

                                                  + *

                                                  ObjectWriter - The uploading account will own the object if the object is uploaded with + * the bucket-owner-full-control canned ACL.

                                                  + *

                                                  BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect permissions. + * The bucket owner automatically owns and has full control over every object in the bucket. The bucket only + * accepts PUT requests that don't specify an ACL or bucket owner full control + * ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format.

                                                  + */ + ObjectOwnership?: ObjectOwnership | string; } export namespace CreateBucketRequest { @@ -5229,15 +5247,21 @@ export namespace NoncurrentVersionExpiration { }); } -export type TransitionStorageClass = "DEEP_ARCHIVE" | "GLACIER" | "INTELLIGENT_TIERING" | "ONEZONE_IA" | "STANDARD_IA"; +export type TransitionStorageClass = + | "DEEP_ARCHIVE" + | "GLACIER" + | "GLACIER_IR" + | "INTELLIGENT_TIERING" + | "ONEZONE_IA" + | "STANDARD_IA"; /** *

                                                  Container for the transition rule that describes when noncurrent objects transition to * the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING, - * GLACIER, or DEEP_ARCHIVE storage class. If your bucket is + * GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is * versioning-enabled (or versioning is suspended), you can set this action to request that * Amazon S3 transition noncurrent object versions to the STANDARD_IA, - * ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, or + * ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or * DEEP_ARCHIVE storage class at a specific period in the object's * lifetime.

                                                  */ @@ -5477,6 +5501,9 @@ export type BucketLogsPermission = "FULL_CONTROL" | "READ" | "WRITE"; /** *

                                                  Container for granting information.

                                                  + *

                                                  Buckets that use the bucket owner enforced setting for Object + * Ownership don't support target grants. For more information, see Permissions server access log delivery in the + * Amazon S3 User Guide.

                                                  */ export interface TargetGrant { /** @@ -5516,6 +5543,9 @@ export interface LoggingEnabled { /** *

                                                  Container for granting information.

                                                  + *

                                                  Buckets that use the bucket owner enforced setting for Object + * Ownership don't support target grants. For more information, see Permissions for server access log delivery in the + * Amazon S3 User Guide.

                                                  */ TargetGrants?: TargetGrant[]; @@ -5804,7 +5834,27 @@ export namespace GetBucketNotificationConfigurationRequest { }); } +/** + *

                                                  A container for specifying the configuration for Amazon EventBridge.

                                                  + */ +export interface EventBridgeConfiguration {} + +export namespace EventBridgeConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: EventBridgeConfiguration): any => ({ + ...obj, + }); +} + export type Event = + | "s3:IntelligentTiering" + | "s3:LifecycleExpiration:*" + | "s3:LifecycleExpiration:Delete" + | "s3:LifecycleExpiration:DeleteMarkerCreated" + | "s3:LifecycleTransition" + | "s3:ObjectAcl:Put" | "s3:ObjectCreated:*" | "s3:ObjectCreated:CompleteMultipartUpload" | "s3:ObjectCreated:Copy" @@ -5815,7 +5865,11 @@ export type Event = | "s3:ObjectRemoved:DeleteMarkerCreated" | "s3:ObjectRestore:*" | "s3:ObjectRestore:Completed" + | "s3:ObjectRestore:Delete" | "s3:ObjectRestore:Post" + | "s3:ObjectTagging:*" + | "s3:ObjectTagging:Delete" + | "s3:ObjectTagging:Put" | "s3:ReducedRedundancyLostObject" | "s3:Replication:*" | "s3:Replication:OperationFailedReplication" @@ -6036,6 +6090,11 @@ export interface NotificationConfiguration { * them.

                                                  */ LambdaFunctionConfigurations?: LambdaFunctionConfiguration[]; + + /** + *

                                                  Enables delivery of events to Amazon EventBridge.

                                                  + */ + EventBridgeConfiguration?: EventBridgeConfiguration; } export namespace NotificationConfiguration { @@ -6047,8 +6106,6 @@ export namespace NotificationConfiguration { }); } -export type ObjectOwnership = "BucketOwnerPreferred" | "ObjectWriter"; - /** *

                                                  The container element for an ownership control rule.

                                                  */ @@ -6060,6 +6117,11 @@ export interface OwnershipControlsRule { * ACL.

                                                  *

                                                  ObjectWriter - The uploading account will own the object if the object is uploaded with * the bucket-owner-full-control canned ACL.

                                                  + *

                                                  BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect permissions. + * The bucket owner automatically owns and has full control over every object in the bucket. The bucket only + * accepts PUT requests that don't specify an ACL or bucket owner full control + * ACLs, such as the bucket-owner-full-control canned + * ACL or an equivalent form of this ACL expressed in the XML format.

                                                  */ ObjectOwnership: ObjectOwnership | string | undefined; } @@ -6094,7 +6156,7 @@ export namespace OwnershipControls { export interface GetBucketOwnershipControlsOutput { /** - *

                                                  The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in + *

                                                  The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) currently in * effect for this Amazon S3 bucket.

                                                  */ OwnershipControls?: OwnershipControls; @@ -9184,6 +9246,7 @@ export namespace ListMultipartUploadsRequest { export type ObjectStorageClass = | "DEEP_ARCHIVE" | "GLACIER" + | "GLACIER_IR" | "INTELLIGENT_TIERING" | "ONEZONE_IA" | "OUTPOSTS" @@ -10521,6 +10584,11 @@ export interface PutBucketNotificationConfigurationRequest { *

                                                  The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

                                                  */ ExpectedBucketOwner?: string; + + /** + *

                                                  Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or false value.

                                                  + */ + SkipDestinationValidation?: boolean; } export namespace PutBucketNotificationConfigurationRequest { @@ -10552,7 +10620,7 @@ export interface PutBucketOwnershipControlsRequest { ExpectedBucketOwner?: string; /** - *

                                                  The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want + *

                                                  The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want * to apply to this Amazon S3 bucket.

                                                  */ OwnershipControls: OwnershipControls | undefined; @@ -11704,22 +11772,3 @@ export namespace RestoreObjectOutput { } export type Tier = "Bulk" | "Expedited" | "Standard"; - -/** - *

                                                  Container for S3 Glacier job parameters.

                                                  - */ -export interface GlacierJobParameters { - /** - *

                                                  Retrieval tier at which the restore will be processed.

                                                  - */ - Tier: Tier | string | undefined; -} - -export namespace GlacierJobParameters { - /** - * @internal - */ - export const filterSensitiveLog = (obj: GlacierJobParameters): any => ({ - ...obj, - }); -} diff --git a/clients/client-s3/src/models/models_1.ts b/clients/client-s3/src/models/models_1.ts index aaaa8d84357b..2f4c5da55386 100644 --- a/clients/client-s3/src/models/models_1.ts +++ b/clients/client-s3/src/models/models_1.ts @@ -2,7 +2,6 @@ import { SENSITIVE_STRING } from "@aws-sdk/smithy-client"; import { Readable } from "stream"; import { - GlacierJobParameters, Grant, ObjectCannedACL, ObjectLockLegalHoldStatus, @@ -16,6 +15,25 @@ import { Tier, } from "./models_0"; +/** + *

                                                  Container for S3 Glacier job parameters.

                                                  + */ +export interface GlacierJobParameters { + /** + *

                                                  Retrieval tier at which the restore will be processed.

                                                  + */ + Tier: Tier | string | undefined; +} + +export namespace GlacierJobParameters { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GlacierJobParameters): any => ({ + ...obj, + }); +} + /** *

                                                  Contains the type of server-side encryption used.

                                                  */ diff --git a/clients/client-s3/src/protocols/Aws_restXml.ts b/clients/client-s3/src/protocols/Aws_restXml.ts index d4ad9492e17b..51f5e336e054 100644 --- a/clients/client-s3/src/protocols/Aws_restXml.ts +++ b/clients/client-s3/src/protocols/Aws_restXml.ts @@ -309,9 +309,9 @@ import { EncryptionConfiguration, ErrorDocument, Event, + EventBridgeConfiguration, ExistingObjectReplication, FilterRule, - GlacierJobParameters, Grant, Grantee, IndexDocument, @@ -398,6 +398,7 @@ import { CSVOutput, Encryption, EndEvent, + GlacierJobParameters, InputSerialization, JSONInput, JSONOutput, @@ -685,6 +686,7 @@ export const serializeAws_restXmlCreateBucketCommand = async ( ...(isSerializableHeaderValue(input.ObjectLockEnabledForBucket) && { "x-amz-bucket-object-lock-enabled": input.ObjectLockEnabledForBucket!.toString(), }), + ...(isSerializableHeaderValue(input.ObjectOwnership) && { "x-amz-object-ownership": input.ObjectOwnership! }), }; let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/{Bucket}"; if (input.Bucket !== undefined) { @@ -3618,6 +3620,9 @@ export const serializeAws_restXmlPutBucketNotificationConfigurationCommand = asy ...(isSerializableHeaderValue(input.ExpectedBucketOwner) && { "x-amz-expected-bucket-owner": input.ExpectedBucketOwner!, }), + ...(isSerializableHeaderValue(input.SkipDestinationValidation) && { + "x-amz-skip-destination-validation": input.SkipDestinationValidation!.toString(), + }), }; let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/{Bucket}"; if (input.Bucket !== undefined) { @@ -6614,11 +6619,18 @@ export const deserializeAws_restXmlGetBucketNotificationConfigurationCommand = a } const contents: GetBucketNotificationConfigurationCommandOutput = { $metadata: deserializeMetadata(output), + EventBridgeConfiguration: undefined, LambdaFunctionConfigurations: undefined, QueueConfigurations: undefined, TopicConfigurations: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data["EventBridgeConfiguration"] !== undefined) { + contents.EventBridgeConfiguration = deserializeAws_restXmlEventBridgeConfiguration( + data["EventBridgeConfiguration"], + context + ); + } if (data.CloudFunctionConfiguration === "") { contents.LambdaFunctionConfigurations = []; } @@ -10863,6 +10875,14 @@ const serializeAws_restXmlErrorDocument = (input: ErrorDocument, context: __Serd return bodyNode; }; +const serializeAws_restXmlEventBridgeConfiguration = ( + input: EventBridgeConfiguration, + context: __SerdeContext +): any => { + const bodyNode = new __XmlNode("EventBridgeConfiguration"); + return bodyNode; +}; + const serializeAws_restXmlEventList = (input: (Event | string)[], context: __SerdeContext): any => { return input .filter((e: any) => e != null) @@ -11622,6 +11642,12 @@ const serializeAws_restXmlNotificationConfiguration = ( bodyNode.addChildNode(node); }); } + if (input.EventBridgeConfiguration !== undefined && input.EventBridgeConfiguration !== null) { + const node = serializeAws_restXmlEventBridgeConfiguration(input.EventBridgeConfiguration, context).withName( + "EventBridgeConfiguration" + ); + bodyNode.addChildNode(node); + } return bodyNode; }; @@ -13118,6 +13144,14 @@ const deserializeAws_restXmlErrors = (output: any, context: __SerdeContext): _Er }); }; +const deserializeAws_restXmlEventBridgeConfiguration = ( + output: any, + context: __SerdeContext +): EventBridgeConfiguration => { + const contents: any = {}; + return contents; +}; + const deserializeAws_restXmlEventList = (output: any, context: __SerdeContext): (Event | string)[] => { return (output || []) .filter((e: any) => e != null) diff --git a/clients/client-snowball/README.md b/clients/client-snowball/README.md index 3f8403d5a3d7..dd26bd12a698 100644 --- a/clients/client-snowball/README.md +++ b/clients/client-snowball/README.md @@ -7,13 +7,12 @@ AWS SDK for JavaScript Snowball Client for Node.js, Browser and React Native. -

                                                  AWS Snow Family is a petabyte-scale data transport solution that uses secure devices to -transfer large amounts of data between your on-premises data centers and Amazon Simple Storage -Service (Amazon S3). The Snow commands described here provide access to the same -functionality that is available in the AWS Snow Family Management Console, which enables you to -create and manage jobs for a Snow device. To transfer data locally with a Snow device, you'll -need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or AWS OpsHub for Snow Family. For more -information, see the User Guide.

                                                  +

                                                  The Amazon Web Services Snow Family provides a petabyte-scale data transport solution that uses +secure devices to transfer large amounts of data between your on-premises data centers and +Amazon Simple Storage Service (Amazon S3). The Snow Family commands described here provide access to the same +functionality that is available in the Amazon Web Services Snow Family Management Console, which enables you to create +and manage jobs for a Snow Family device. To transfer data locally with a Snow Family device, +you'll need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or OpsHub for Snow Family. For more information, see the User Guide.

                                                  ## Installing diff --git a/clients/client-snowball/src/Snowball.ts b/clients/client-snowball/src/Snowball.ts index ccf3e9a61643..e2a1c7d32639 100644 --- a/clients/client-snowball/src/Snowball.ts +++ b/clients/client-snowball/src/Snowball.ts @@ -108,13 +108,12 @@ import { import { SnowballClient } from "./SnowballClient"; /** - *

                                                  AWS Snow Family is a petabyte-scale data transport solution that uses secure devices to - * transfer large amounts of data between your on-premises data centers and Amazon Simple Storage - * Service (Amazon S3). The Snow commands described here provide access to the same - * functionality that is available in the AWS Snow Family Management Console, which enables you to - * create and manage jobs for a Snow device. To transfer data locally with a Snow device, you'll - * need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or AWS OpsHub for Snow Family. For more - * information, see the User Guide.

                                                  + *

                                                  The Amazon Web Services Snow Family provides a petabyte-scale data transport solution that uses + * secure devices to transfer large amounts of data between your on-premises data centers and + * Amazon Simple Storage Service (Amazon S3). The Snow Family commands described here provide access to the same + * functionality that is available in the Amazon Web Services Snow Family Management Console, which enables you to create + * and manage jobs for a Snow Family device. To transfer data locally with a Snow Family device, + * you'll need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or OpsHub for Snow Family. For more information, see the User Guide.

                                                  */ export class Snowball extends SnowballClient { /** @@ -250,21 +249,21 @@ export class Snowball extends SnowballClient { /** *

                                                  Creates a job to import or export data between Amazon S3 and your on-premises data - * center. Your AWS account must have the right trust policies and permissions in place to create - * a job for a Snow device. If you're creating a job for a node in a cluster, you only need to provide - * the clusterId value; the other job attributes are inherited from the cluster. - *

                                                  + * center. Your Amazon Web Services account must have the right trust policies and permissions in + * place to create a job for a Snow device. If you're creating a job for a node in a cluster, you + * only need to provide the clusterId value; the other job attributes are inherited + * from the cluster.

                                                  * *

                                                  Only the Snowball; Edge device type is supported when ordering clustered jobs.

                                                  *

                                                  The device capacity is optional.

                                                  - *

                                                  Availability of device types differ by AWS Region. For more information about Region - * availability, see AWS Regional Services.

                                                  + *

                                                  Availability of device types differ by Amazon Web Services Region. For more information + * about Region availability, see Amazon Web Services Regional Services.

                                                  *
                                                  * *

                                                  * *

                                                  - * AWS Snow Family device types and their capacities. + * Snow Family Devices and their capacities. *

                                                  *
                                                    *
                                                  • @@ -358,7 +357,8 @@ export class Snowball extends SnowballClient { *
                                                  • *

                                                    Description: Original Snowball device

                                                    * - *

                                                    This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                    + *

                                                    This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region + *

                                                    *
                                                    *
                                                  • *
                                                  @@ -374,7 +374,7 @@ export class Snowball extends SnowballClient { *
                                                • *

                                                  Description: Original Snowball device

                                                  * - *

                                                  This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                  + *

                                                  This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region.

                                                  *
                                                  *
                                                • *
                                                @@ -407,8 +407,7 @@ export class Snowball extends SnowballClient { /** *

                                                Creates a job with the long-term usage option for a device. The long-term usage is a - * 1-year or 3-year long-term pricing type for the device. You are billed upfront, and AWS - * provides discounts for long-term pricing. + * 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web Services provides discounts for long-term pricing. *

                                                */ public createLongTermPricing( @@ -441,7 +440,7 @@ export class Snowball extends SnowballClient { } /** - *

                                                Creates a shipping label that will be used to return the Snow device to AWS.

                                                + *

                                                Creates a shipping label that will be used to return the Snow device to Amazon Web Services.

                                                */ public createReturnShippingLabel( args: CreateReturnShippingLabelCommandInput, @@ -600,7 +599,7 @@ export class Snowball extends SnowballClient { } /** - *

                                                Information on the shipping label of a Snow device that is being returned to AWS.

                                                + *

                                                Information on the shipping label of a Snow device that is being returned to Amazon Web Services.

                                                */ public describeReturnShippingLabel( args: DescribeReturnShippingLabelCommandInput, @@ -639,7 +638,7 @@ export class Snowball extends SnowballClient { * *

                                                The manifest is an encrypted file that you can download after your job enters the * WithCustomer status. The manifest is decrypted by using the - * UnlockCode code value, when you pass both values to the Snow device through the + * UnlockCode code value, when you pass both values to the Snow device through the * Snowball client when the client is started for the first time.

                                                * * @@ -688,13 +687,13 @@ export class Snowball extends SnowballClient { * *

                                                The UnlockCode value is a 29-character code with 25 alphanumeric * characters and 4 hyphens. This code is used to decrypt the manifest file when it is passed - * along with the manifest to the Snow device through the Snowball client when the client is started - * for the first time.

                                                + * along with the manifest to the Snow device through the Snowball client when the client is + * started for the first time.

                                                * *

                                                As a best practice, we recommend that you don't save a copy of the * UnlockCode in the same location as the manifest file for that job. Saving these - * separately helps prevent unauthorized parties from gaining access to the Snow device associated - * with that job.

                                                + * separately helps prevent unauthorized parties from gaining access to the Snow device + * associated with that job.

                                                */ public getJobUnlockCode( args: GetJobUnlockCodeCommandInput, @@ -729,8 +728,8 @@ export class Snowball extends SnowballClient { *

                                                Returns information about the Snow Family service limit for your account, and also the * number of Snow devices your account has in use.

                                                * - *

                                                The default service limit for the number of Snow devices that you can have at one time is - * 1. If you want to increase your service limit, contact AWS Support.

                                                + *

                                                The default service limit for the number of Snow devices that you can have at one time + * is 1. If you want to increase your service limit, contact Amazon Web Services Support.

                                                */ public getSnowballUsage( args: GetSnowballUsageCommandInput, @@ -763,7 +762,7 @@ export class Snowball extends SnowballClient { /** *

                                                Returns an Amazon S3 presigned URL for an update file associated with a specified - * JobId.

                                                + * JobId.

                                                */ public getSoftwareUpdates( args: GetSoftwareUpdatesCommandInput, @@ -861,10 +860,10 @@ export class Snowball extends SnowballClient { /** *

                                                This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs) - * that are owned by your AWS account that would be supported for use on a Snow device. - * Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, Ubuntu Server - * 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the AWS - * Marketplace.

                                                + * that are owned by your Amazon Web Services accountthat would be supported for use on a Snow + * device. Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, + * Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the + * Amazon Web Services Marketplace.

                                                */ public listCompatibleImages( args: ListCompatibleImagesCommandInput, diff --git a/clients/client-snowball/src/SnowballClient.ts b/clients/client-snowball/src/SnowballClient.ts index 4f8ac9c6c146..7e8b9429d7f3 100644 --- a/clients/client-snowball/src/SnowballClient.ts +++ b/clients/client-snowball/src/SnowballClient.ts @@ -300,13 +300,12 @@ type SnowballClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHand export interface SnowballClientResolvedConfig extends SnowballClientResolvedConfigType {} /** - *

                                                AWS Snow Family is a petabyte-scale data transport solution that uses secure devices to - * transfer large amounts of data between your on-premises data centers and Amazon Simple Storage - * Service (Amazon S3). The Snow commands described here provide access to the same - * functionality that is available in the AWS Snow Family Management Console, which enables you to - * create and manage jobs for a Snow device. To transfer data locally with a Snow device, you'll - * need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or AWS OpsHub for Snow Family. For more - * information, see the User Guide.

                                                + *

                                                The Amazon Web Services Snow Family provides a petabyte-scale data transport solution that uses + * secure devices to transfer large amounts of data between your on-premises data centers and + * Amazon Simple Storage Service (Amazon S3). The Snow Family commands described here provide access to the same + * functionality that is available in the Amazon Web Services Snow Family Management Console, which enables you to create + * and manage jobs for a Snow Family device. To transfer data locally with a Snow Family device, + * you'll need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or OpsHub for Snow Family. For more information, see the User Guide.

                                                */ export class SnowballClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-snowball/src/commands/CreateJobCommand.ts b/clients/client-snowball/src/commands/CreateJobCommand.ts index 960065531bf9..b74935ab4bcf 100644 --- a/clients/client-snowball/src/commands/CreateJobCommand.ts +++ b/clients/client-snowball/src/commands/CreateJobCommand.ts @@ -20,21 +20,21 @@ export interface CreateJobCommandOutput extends CreateJobResult, __MetadataBeare /** *

                                                Creates a job to import or export data between Amazon S3 and your on-premises data - * center. Your AWS account must have the right trust policies and permissions in place to create - * a job for a Snow device. If you're creating a job for a node in a cluster, you only need to provide - * the clusterId value; the other job attributes are inherited from the cluster. - *

                                                + * center. Your Amazon Web Services account must have the right trust policies and permissions in + * place to create a job for a Snow device. If you're creating a job for a node in a cluster, you + * only need to provide the clusterId value; the other job attributes are inherited + * from the cluster.

                                                * *

                                                Only the Snowball; Edge device type is supported when ordering clustered jobs.

                                                *

                                                The device capacity is optional.

                                                - *

                                                Availability of device types differ by AWS Region. For more information about Region - * availability, see AWS Regional Services.

                                                + *

                                                Availability of device types differ by Amazon Web Services Region. For more information + * about Region availability, see Amazon Web Services Regional Services.

                                                *
                                                * *

                                                * *

                                                - * AWS Snow Family device types and their capacities. + * Snow Family Devices and their capacities. *

                                                *
                                                  *
                                                • @@ -128,7 +128,8 @@ export interface CreateJobCommandOutput extends CreateJobResult, __MetadataBeare *
                                                • *

                                                  Description: Original Snowball device

                                                  * - *

                                                  This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                  + *

                                                  This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region + *

                                                  *
                                                  *
                                                • *
                                                @@ -144,7 +145,7 @@ export interface CreateJobCommandOutput extends CreateJobResult, __MetadataBeare *
                                              • *

                                                Description: Original Snowball device

                                                * - *

                                                This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                + *

                                                This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region.

                                                *
                                                *
                                              • *
                                              diff --git a/clients/client-snowball/src/commands/CreateLongTermPricingCommand.ts b/clients/client-snowball/src/commands/CreateLongTermPricingCommand.ts index ba3796ad034e..642d7d384239 100644 --- a/clients/client-snowball/src/commands/CreateLongTermPricingCommand.ts +++ b/clients/client-snowball/src/commands/CreateLongTermPricingCommand.ts @@ -23,8 +23,7 @@ export interface CreateLongTermPricingCommandOutput extends CreateLongTermPricin /** *

                                              Creates a job with the long-term usage option for a device. The long-term usage is a - * 1-year or 3-year long-term pricing type for the device. You are billed upfront, and AWS - * provides discounts for long-term pricing. + * 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web Services provides discounts for long-term pricing. *

                                              * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-snowball/src/commands/CreateReturnShippingLabelCommand.ts b/clients/client-snowball/src/commands/CreateReturnShippingLabelCommand.ts index 264b296e6bae..703f3e33b1a6 100644 --- a/clients/client-snowball/src/commands/CreateReturnShippingLabelCommand.ts +++ b/clients/client-snowball/src/commands/CreateReturnShippingLabelCommand.ts @@ -22,7 +22,7 @@ export interface CreateReturnShippingLabelCommandInput extends CreateReturnShipp export interface CreateReturnShippingLabelCommandOutput extends CreateReturnShippingLabelResult, __MetadataBearer {} /** - *

                                              Creates a shipping label that will be used to return the Snow device to AWS.

                                              + *

                                              Creates a shipping label that will be used to return the Snow device to Amazon Web Services.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/commands/DescribeReturnShippingLabelCommand.ts b/clients/client-snowball/src/commands/DescribeReturnShippingLabelCommand.ts index b86ae88f837f..855931865381 100644 --- a/clients/client-snowball/src/commands/DescribeReturnShippingLabelCommand.ts +++ b/clients/client-snowball/src/commands/DescribeReturnShippingLabelCommand.ts @@ -22,7 +22,7 @@ export interface DescribeReturnShippingLabelCommandInput extends DescribeReturnS export interface DescribeReturnShippingLabelCommandOutput extends DescribeReturnShippingLabelResult, __MetadataBearer {} /** - *

                                              Information on the shipping label of a Snow device that is being returned to AWS.

                                              + *

                                              Information on the shipping label of a Snow device that is being returned to Amazon Web Services.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/commands/GetJobManifestCommand.ts b/clients/client-snowball/src/commands/GetJobManifestCommand.ts index 318859e25d14..ffe8ad4cb431 100644 --- a/clients/client-snowball/src/commands/GetJobManifestCommand.ts +++ b/clients/client-snowball/src/commands/GetJobManifestCommand.ts @@ -29,7 +29,7 @@ export interface GetJobManifestCommandOutput extends GetJobManifestResult, __Met * *

                                              The manifest is an encrypted file that you can download after your job enters the * WithCustomer status. The manifest is decrypted by using the - * UnlockCode code value, when you pass both values to the Snow device through the + * UnlockCode code value, when you pass both values to the Snow device through the * Snowball client when the client is started for the first time.

                                              * * diff --git a/clients/client-snowball/src/commands/GetJobUnlockCodeCommand.ts b/clients/client-snowball/src/commands/GetJobUnlockCodeCommand.ts index 289fdd8bee5d..640bfcbc2ad1 100644 --- a/clients/client-snowball/src/commands/GetJobUnlockCodeCommand.ts +++ b/clients/client-snowball/src/commands/GetJobUnlockCodeCommand.ts @@ -28,13 +28,13 @@ export interface GetJobUnlockCodeCommandOutput extends GetJobUnlockCodeResult, _ * *

                                              The UnlockCode value is a 29-character code with 25 alphanumeric * characters and 4 hyphens. This code is used to decrypt the manifest file when it is passed - * along with the manifest to the Snow device through the Snowball client when the client is started - * for the first time.

                                              + * along with the manifest to the Snow device through the Snowball client when the client is + * started for the first time.

                                              * *

                                              As a best practice, we recommend that you don't save a copy of the * UnlockCode in the same location as the manifest file for that job. Saving these - * separately helps prevent unauthorized parties from gaining access to the Snow device associated - * with that job.

                                              + * separately helps prevent unauthorized parties from gaining access to the Snow device + * associated with that job.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/commands/GetSnowballUsageCommand.ts b/clients/client-snowball/src/commands/GetSnowballUsageCommand.ts index 06792b50ba23..2cdc9867708d 100644 --- a/clients/client-snowball/src/commands/GetSnowballUsageCommand.ts +++ b/clients/client-snowball/src/commands/GetSnowballUsageCommand.ts @@ -25,8 +25,8 @@ export interface GetSnowballUsageCommandOutput extends GetSnowballUsageResult, _ *

                                              Returns information about the Snow Family service limit for your account, and also the * number of Snow devices your account has in use.

                                              * - *

                                              The default service limit for the number of Snow devices that you can have at one time is - * 1. If you want to increase your service limit, contact AWS Support.

                                              + *

                                              The default service limit for the number of Snow devices that you can have at one time + * is 1. If you want to increase your service limit, contact Amazon Web Services Support.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/commands/GetSoftwareUpdatesCommand.ts b/clients/client-snowball/src/commands/GetSoftwareUpdatesCommand.ts index 9d0cfb8314ff..1a2d004785d6 100644 --- a/clients/client-snowball/src/commands/GetSoftwareUpdatesCommand.ts +++ b/clients/client-snowball/src/commands/GetSoftwareUpdatesCommand.ts @@ -23,7 +23,7 @@ export interface GetSoftwareUpdatesCommandOutput extends GetSoftwareUpdatesResul /** *

                                              Returns an Amazon S3 presigned URL for an update file associated with a specified - * JobId.

                                              + * JobId.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/commands/ListCompatibleImagesCommand.ts b/clients/client-snowball/src/commands/ListCompatibleImagesCommand.ts index c79a4185b91c..5503accda093 100644 --- a/clients/client-snowball/src/commands/ListCompatibleImagesCommand.ts +++ b/clients/client-snowball/src/commands/ListCompatibleImagesCommand.ts @@ -23,10 +23,10 @@ export interface ListCompatibleImagesCommandOutput extends ListCompatibleImagesR /** *

                                              This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs) - * that are owned by your AWS account that would be supported for use on a Snow device. - * Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, Ubuntu Server - * 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the AWS - * Marketplace.

                                              + * that are owned by your Amazon Web Services accountthat would be supported for use on a Snow + * device. Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, + * Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the + * Amazon Web Services Marketplace.

                                              * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-snowball/src/models/models_0.ts b/clients/client-snowball/src/models/models_0.ts index 566c30df54ea..100660a89c0e 100644 --- a/clients/client-snowball/src/models/models_0.ts +++ b/clients/client-snowball/src/models/models_0.ts @@ -1,9 +1,9 @@ import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; /** - *

                                              The address that you want the Snow device(s) associated with a specific job to - * be shipped to. Addresses are validated at the time of creation. The address you provide must - * be located within the serviceable area of your region. Although no individual elements of the + *

                                              The address that you want the Snow device(s) associated with a specific job to be + * shipped to. Addresses are validated at the time of creation. The address you provide must be + * located within the serviceable area of your region. Although no individual elements of the * Address are required, if the address is invalid or unsupported, then an * exception is thrown.

                                              */ @@ -164,8 +164,8 @@ export namespace InvalidResourceException { } /** - *

                                              The provided AWS Key Management Service key lacks the permissions to perform the - * specified CreateJob or UpdateJob action.

                                              + *

                                              The provided Key Management Service key lacks the permissions to perform the specified + * CreateJob or UpdateJob action.

                                              */ export interface KMSRequestFailedException extends __SmithyException, $MetadataBearer { name: "KMSRequestFailedException"; @@ -229,7 +229,8 @@ export namespace CreateAddressRequest { export interface CreateAddressResult { /** *

                                              The automatically generated ID for a specific address. You'll use this ID when you - * create a job to specify which address you want the Snow device for that job shipped to.

                                              + * create a job to specify which address you want the Snow device for that job shipped + * to.

                                              */ AddressId?: string; } @@ -265,7 +266,7 @@ export namespace InvalidAddressException { /** *

                                              The address is either outside the serviceable area for your region, or an error * occurred. Check the address with your region's carrier and try again. If the issue persists, - * contact AWS Support.

                                              + * contact Amazon Web Services Support.

                                              */ export interface UnsupportedAddressException extends __SmithyException, $MetadataBearer { name: "UnsupportedAddressException"; @@ -321,8 +322,8 @@ export interface Notification { * create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API * action.

                                              * - *

                                              You can subscribe email addresses to an Amazon SNS topic through the AWS Management - * Console, or by using the Subscribe Amazon Simple Notification Service (Amazon SNS) API action.

                                              + *

                                              You can subscribe email addresses to an Amazon SNS topic through the Amazon Web Services Management Console, or by using the Subscribe Amazon Simple Notification + * Service (Amazon SNS) API action.

                                              */ SnsTopicARN?: string; @@ -351,11 +352,12 @@ export enum StorageUnit { } /** - *

                                              An object that represents metadata and configuration settings for NFS service on an AWS Snow Family device.

                                              + *

                                              An object that represents the metadata and configuration settings for the NFS (Network + * File System) service on an Amazon Web Services Snow Family device.

                                              */ export interface NFSOnDeviceServiceConfiguration { /** - *

                                              The maximum NFS storage for one Snowball Family device.

                                              + *

                                              The maximum NFS storage for one Snow Family device.

                                              */ StorageLimit?: number; @@ -376,13 +378,45 @@ export namespace NFSOnDeviceServiceConfiguration { } /** - *

                                              An object that represents metadata and configuration settings for services on an AWS Snow Family device.

                                              + *

                                              An object that represents the metadata and configuration settings for the Storage Gateway + * service Tape Gateway type on an Amazon Web Services Snow Family device.

                                              + */ +export interface TGWOnDeviceServiceConfiguration { + /** + *

                                              The maximum number of virtual tapes to store on one Snow Family device. Due to physical + * resource limitations, this value must be set to 80 for Snowball Edge.

                                              + */ + StorageLimit?: number; + + /** + *

                                              The scale unit of the virtual tapes on the device.

                                              + */ + StorageUnit?: StorageUnit | string; +} + +export namespace TGWOnDeviceServiceConfiguration { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TGWOnDeviceServiceConfiguration): any => ({ + ...obj, + }); +} + +/** + *

                                              An object that represents the metadata and configuration settings for services on an Amazon Web Services + * Snow Family device.

                                              */ export interface OnDeviceServiceConfiguration { /** - *

                                              Represents the NFS service on a Snow Family device.

                                              + *

                                              Represents the NFS (Network File System) service on a Snow Family device.

                                              */ NFSOnDeviceService?: NFSOnDeviceServiceConfiguration; + + /** + *

                                              Represents the Storage Gateway service Tape Gateway type on a Snow Family device.

                                              + */ + TGWOnDeviceService?: TGWOnDeviceServiceConfiguration; } export namespace OnDeviceServiceConfiguration { @@ -402,7 +436,7 @@ export enum RemoteManagement { /** *

                                              A JSON-formatted object that contains the IDs for an Amazon Machine Image (AMI), * including the Amazon EC2 AMI ID and the Snow device AMI ID. Each AMI has these two IDs to - * simplify identifying the AMI in both the AWS Cloud and on the device.

                                              + * simplify identifying the AMI in both the Amazon Web Services Cloud and on the device.

                                              */ export interface Ec2AmiResource { /** @@ -430,8 +464,7 @@ export namespace Ec2AmiResource { */ export interface EventTriggerDefinition { /** - *

                                              The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda - * function's event trigger associated with this job.

                                              + *

                                              The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an Lambda function's event trigger associated with this job.

                                              */ EventResourceARN?: string; } @@ -450,8 +483,8 @@ export namespace EventTriggerDefinition { */ export interface LambdaResource { /** - *

                                              An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by - * PUT object actions on the associated local Amazon S3 resource.

                                              + *

                                              An Amazon Resource Name (ARN) that represents an Lambda function to be + * triggered by PUT object actions on the associated local Amazon S3 resource.

                                              */ LambdaArn?: string; @@ -512,17 +545,19 @@ export enum TransferOption { /** *

                                              An object that represents the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                              + * transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and + * NFS (Network File System).

                                              */ export interface TargetOnDeviceService { /** - *

                                              Specifies the name of the service on the Snow Family device that your - * transferred data will be exported from or imported into.

                                              + *

                                              Specifies the name of the service on the Snow Family device that your transferred data + * will be exported from or imported into.

                                              */ ServiceName?: DeviceServiceName | string; /** - *

                                              Specifies whether the data is being imported or exported. You can import or export the data, or use it locally on the device.

                                              + *

                                              Specifies whether the data is being imported or exported. You can import or export the + * data, or use it locally on the device.

                                              */ TransferOption?: TransferOption | string; } @@ -558,8 +593,9 @@ export interface S3Resource { KeyRange?: KeyRange; /** - *

                                              Specifies the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                              + *

                                              Specifies the service or services on the Snow Family device that your transferred data + * will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File + * System).

                                              */ TargetOnDeviceServices?: TargetOnDeviceService[]; } @@ -574,9 +610,9 @@ export namespace S3Resource { } /** - *

                                              Contains an array of AWS resource objects. Each object represents an Amazon S3 bucket, - * an AWS Lambda function, or an Amazon Machine Image (AMI) based on Amazon EC2 that is - * associated with a particular job.

                                              + *

                                              Contains an array of Amazon Web Services resource objects. Each object represents an + * Amazon S3 bucket, an Lambda function, or an Amazon Machine Image (AMI) based + * on Amazon EC2 that is associated with a particular job.

                                              */ export interface JobResource { /** @@ -622,11 +658,12 @@ export enum SnowballType { } /** - *

                                              The tax documents required in AWS Regions in India.

                                              + *

                                              The tax documents required in Amazon Web Services Region in India.

                                              */ export interface INDTaxDocuments { /** - *

                                              The Goods and Services Tax (GST) documents required in AWS Regions in India.

                                              + *

                                              The Goods and Services Tax (GST) documents required in Amazon Web Services Region in + * India.

                                              */ GSTIN?: string; } @@ -641,11 +678,11 @@ export namespace INDTaxDocuments { } /** - *

                                              The tax documents required in your AWS Region.

                                              + *

                                              The tax documents required in your Amazon Web Services Region.

                                              */ export interface TaxDocuments { /** - *

                                              The tax documents required in AWS Regions in India.

                                              + *

                                              The tax documents required in Amazon Web Services Region in India.

                                              */ IND?: INDTaxDocuments; } @@ -674,13 +711,15 @@ export interface CreateClusterRequest { /** *

                                              The resources associated with the cluster job. These resources include Amazon S3 - * buckets and optional AWS Lambda functions written in the Python language.

                                              + * buckets and optional Lambda functions written in the Python language. + *

                                              */ Resources: JobResource | undefined; /** - *

                                              Specifies the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                              + *

                                              Specifies the service or services on the Snow Family device that your transferred data + * will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS + * (Network File System).

                                              */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; @@ -697,23 +736,21 @@ export interface CreateClusterRequest { /** *

                                              The KmsKeyARN value that you want to associate with this cluster. - * KmsKeyARN values are created by using the CreateKey API action in AWS Key - * Management Service (AWS KMS).

                                              + * KmsKeyARN values are created by using the CreateKey API action in Key Management Service (KMS).

                                              */ KmsKeyARN?: string; /** *

                                              The RoleARN that you want to associate with this cluster. - * RoleArn values are created by using the CreateRole API action in AWS - * Identity and Access Management (IAM).

                                              + * RoleArn values are created by using the CreateRole API action in Identity and Access Management (IAM).

                                              */ RoleARN: string | undefined; /** - *

                                              The type of AWS Snow Family device to use for this cluster. - *

                                              + *

                                              The type of Snow Family Devices to use for this cluster.

                                              * - *

                                              For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                              + *

                                              For cluster jobs, Amazon Web Services Snow Family currently supports only the + * EDGE device type.

                                              *
                                              * *

                                              For more information, see @@ -727,16 +764,15 @@ export interface CreateClusterRequest { /** *

                                              The shipping speed for each node in this cluster. This speed doesn't dictate how soon * you'll get each Snowball Edge device, rather it represents how quickly each device moves to - * its destination while in transit. Regional shipping speeds are as follows: - *

                                              + * its destination while in transit. Regional shipping speeds are as follows:

                                              *
                                                *
                                              • *

                                                In Australia, you have access to express shipping. Typically, Snow devices shipped * express are delivered in about a day.

                                                *
                                              • *
                                              • - *

                                                In the European Union (EU), you have access to express shipping. Typically, - * Snow devices shipped express are delivered in about a day. In addition, most countries in the + *

                                                In the European Union (EU), you have access to express shipping. Typically, Snow + * devices shipped express are delivered in about a day. In addition, most countries in the * EU have access to standard shipping, which typically takes less than a week, one * way.

                                                *
                                              • @@ -755,9 +791,9 @@ export interface CreateClusterRequest { * express are delivered in about a day.

                                                * *
                                              • - *

                                                In the European Union (EU), you have access to express shipping. Typically, - * Snow devices shipped express are delivered in about a day. In addition, most countries - * in the EU have access to standard shipping, which typically takes less than a week, one + *

                                                In the European Union (EU), you have access to express shipping. Typically, Snow + * devices shipped express are delivered in about a day. In addition, most countries in the + * EU have access to standard shipping, which typically takes less than a week, one * way.

                                                *
                                              • *
                                              • @@ -783,7 +819,7 @@ export interface CreateClusterRequest { ForwardingAddressId?: string; /** - *

                                                The tax documents required in your AWS Region.

                                                + *

                                                The tax documents required in your Amazon Web Services Region.

                                                */ TaxDocuments?: TaxDocuments; @@ -841,8 +877,7 @@ export namespace Ec2RequestFailedException { } /** - *

                                                Job or cluster creation failed. One or more inputs were invalid. Confirm that the - * CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

                                                + *

                                                Job or cluster creation failed. One or more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

                                                */ export interface InvalidInputCombinationException extends __SmithyException, $MetadataBearer { name: "InvalidInputCombinationException"; @@ -880,11 +915,11 @@ export namespace ClusterLimitExceededException { } /** - *

                                                Configures the wireless connection on an AWS Snowcone device.

                                                + *

                                                Configures the wireless connection on an Snowcone device.

                                                */ export interface WirelessConnection { /** - *

                                                Enables the Wi-Fi adapter on an AWS Snowcone device.

                                                + *

                                                Enables the Wi-Fi adapter on an Snowcone device.

                                                */ IsWifiEnabled?: boolean; } @@ -899,11 +934,11 @@ export namespace WirelessConnection { } /** - *

                                                Specifies the device configuration for an AWS Snowcone job.

                                                + *

                                                Specifies the device configuration for an Snowcone job.

                                                */ export interface SnowconeDeviceConfiguration { /** - *

                                                Configures the wireless connection for the AWS Snowcone device.

                                                + *

                                                Configures the wireless connection for the Snowcone device.

                                                */ WirelessConnection?: WirelessConnection; } @@ -922,7 +957,8 @@ export namespace SnowconeDeviceConfiguration { */ export interface DeviceConfiguration { /** - *

                                                Returns information about the device configuration for an AWS Snowcone job.

                                                + *

                                                Returns information about the device configuration for an Snowcone + * job.

                                                */ SnowconeDeviceConfiguration?: SnowconeDeviceConfiguration; } @@ -969,8 +1005,9 @@ export interface CreateJobRequest { Resources?: JobResource; /** - *

                                                Specifies the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                + *

                                                Specifies the service or services on the Snow Family device that your transferred data + * will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File + * System) and the Amazon Web Services Storage Gateway service Tape Gateway type.

                                                */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; @@ -987,15 +1024,15 @@ export interface CreateJobRequest { /** *

                                                The KmsKeyARN that you want to associate with this job. - * KmsKeyARNs are created using the CreateKey AWS Key Management - * Service (KMS) API action.

                                                + * KmsKeyARNs are created using the CreateKey + * Key Management Service (KMS) API action.

                                                */ KmsKeyARN?: string; /** *

                                                The RoleARN that you want to associate with this job. - * RoleArns are created using the CreateRole AWS Identity and - * Access Management (IAM) API action.

                                                + * RoleArns are created using the CreateRole + * Identity and Access Management (IAM) API action.

                                                */ RoleARN?: string; @@ -1014,8 +1051,8 @@ export interface CreateJobRequest { /** *

                                                The shipping speed for this job. This speed doesn't dictate how soon you'll get the - * Snow device, rather it represents how quickly the Snow device moves to its destination while in - * transit. Regional shipping speeds are as follows:

                                                + * Snow device, rather it represents how quickly the Snow device moves to its destination while + * in transit. Regional shipping speeds are as follows:

                                                * *
                                                  *
                                                • @@ -1023,8 +1060,8 @@ export interface CreateJobRequest { * express are delivered in about a day.

                                                  *
                                                • *
                                                • - *

                                                  In the European Union (EU), you have access to express shipping. Typically, - * Snow devices shipped express are delivered in about a day. In addition, most countries in the + *

                                                  In the European Union (EU), you have access to express shipping. Typically, Snow + * devices shipped express are delivered in about a day. In addition, most countries in the * EU have access to standard shipping, which typically takes less than a week, one * way.

                                                  *
                                                • @@ -1052,15 +1089,16 @@ export interface CreateJobRequest { ClusterId?: string; /** - *

                                                  The type of AWS Snow Family device to use for this job. + *

                                                  The type of Snow Family Devices to use for this job. *

                                                  * - *

                                                  For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                                  + *

                                                  For cluster jobs, Amazon Web Services Snow Family currently supports only the + * EDGE device type.

                                                  *
                                                  - *

                                                  The type of AWS Snow device to use for this job. Currently, the only supported - * device type for cluster jobs is EDGE.

                                                  + *

                                                  The type of Amazon Web Services Snow device to use for this job. Currently, the only + * supported device type for cluster jobs is EDGE.

                                                  *

                                                  For more information, see Snowball Edge Device - * Options in the Snowball Edge Developer Guide.

                                                  + * Options in the Snowball Edge Developer Guide.

                                                  * *

                                                  For more information, see * "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow @@ -1077,12 +1115,12 @@ export interface CreateJobRequest { ForwardingAddressId?: string; /** - *

                                                  The tax documents required in your AWS Region.

                                                  + *

                                                  The tax documents required in your Amazon Web Services Region.

                                                  */ TaxDocuments?: TaxDocuments; /** - *

                                                  Defines the device configuration for an AWS Snowcone job.

                                                  + *

                                                  Defines the device configuration for an Snowcone job.

                                                  * *

                                                  For more information, see * "https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html" (Snow @@ -1145,12 +1183,13 @@ export interface CreateLongTermPricingRequest { LongTermPricingType: LongTermPricingType | string | undefined; /** - *

                                                  Specifies whether the current long-term pricing type for the device should be renewed.

                                                  + *

                                                  Specifies whether the current long-term pricing type for the device should be + * renewed.

                                                  */ IsLongTermPricingAutoRenew?: boolean; /** - *

                                                  The type of AWS Snow Family device to use for the long-term pricing job.

                                                  + *

                                                  The type of Snow Family Devices to use for the long-term pricing job.

                                                  */ SnowballType?: SnowballType | string; } @@ -1181,7 +1220,8 @@ export namespace CreateLongTermPricingResult { } /** - *

                                                  You get this exception when you call CreateReturnShippingLabel more than once when other requests are not completed.

                                                  + *

                                                  You get this exception when you call CreateReturnShippingLabel more than once + * when other requests are not completed.

                                                  */ export interface ConflictException extends __SmithyException, $MetadataBearer { name: "ConflictException"; @@ -1207,8 +1247,8 @@ export interface CreateReturnShippingLabelRequest { JobId: string | undefined; /** - *

                                                  The shipping speed for a particular job. This speed doesn't dictate how soon the device is returned to AWS. - * This speed represents how quickly it moves to its + *

                                                  The shipping speed for a particular job. This speed doesn't dictate how soon the device + * is returned to Amazon Web Services. This speed represents how quickly it moves to its * destination while in transit. Regional shipping speeds are as follows:

                                                  */ ShippingOption?: ShippingOption | string; @@ -1232,7 +1272,7 @@ export enum ShippingLabelStatus { export interface CreateReturnShippingLabelResult { /** - *

                                                  The status information of the task on a Snow device that is being returned to AWS.

                                                  + *

                                                  The status information of the task on a Snow device that is being returned to Amazon Web Services.

                                                  */ Status?: ShippingLabelStatus | string; } @@ -1247,9 +1287,9 @@ export namespace CreateReturnShippingLabelResult { } /** - *

                                                  You get this exception if you call CreateReturnShippingLabel and a valid return - * shipping label already exists. In this case, use - * DescribeReturnShippingLabel to get the url.

                                                  + *

                                                  You get this exception if you call CreateReturnShippingLabel and a valid + * return shipping label already exists. In this case, use + * DescribeReturnShippingLabel to get the URL.

                                                  */ export interface ReturnShippingLabelAlreadyExistsException extends __SmithyException, $MetadataBearer { name: "ReturnShippingLabelAlreadyExistsException"; @@ -1284,8 +1324,8 @@ export namespace DescribeAddressRequest { export interface DescribeAddressResult { /** - *

                                                  The address that you want the Snow device(s) associated with a specific job to - * be shipped to.

                                                  + *

                                                  The address that you want the Snow device(s) associated with a specific job to be + * shipped to.

                                                  */ Address?: Address; } @@ -1405,14 +1445,13 @@ export interface ClusterMetadata { /** *

                                                  The KmsKeyARN Amazon Resource Name (ARN) associated with this cluster. - * This ARN was created using the CreateKey API action in AWS Key - * Management Service (AWS KMS).

                                                  + * This ARN was created using the CreateKey API action in Key Management Service (KMS.

                                                  */ KmsKeyARN?: string; /** *

                                                  The role ARN associated with this cluster. This ARN was created using the CreateRole - * API action in AWS Identity and Access Management (IAM).

                                                  + * API action in Identity and Access Management (IAM).

                                                  */ RoleARN?: string; @@ -1428,10 +1467,11 @@ export interface ClusterMetadata { JobType?: JobType | string; /** - *

                                                  The type of AWS Snow device to use for this cluster. + *

                                                  The type of Snowcone device to use for this cluster. *

                                                  * - *

                                                  For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                                  + *

                                                  For cluster jobs, Amazon Web Services Snow Family currently supports only the + * EDGE device type.

                                                  *
                                                  */ SnowballType?: SnowballType | string; @@ -1453,8 +1493,8 @@ export interface ClusterMetadata { /** *

                                                  The shipping speed for each node in this cluster. This speed doesn't dictate how soon - * you'll get each device, rather it represents how quickly each device moves to - * its destination while in transit. Regional shipping speeds are as follows:

                                                  + * you'll get each device, rather it represents how quickly each device moves to its destination + * while in transit. Regional shipping speeds are as follows:

                                                  * *
                                                    *
                                                  • @@ -1462,9 +1502,9 @@ export interface ClusterMetadata { * express are delivered in about a day.

                                                    *
                                                  • *
                                                  • - *

                                                    In the European Union (EU), you have access to express shipping. Typically, - * Snow devices shipped express are delivered in about a day. In addition, most countries - * in the EU have access to standard shipping, which typically takes less than a week, one + *

                                                    In the European Union (EU), you have access to express shipping. Typically, Snow + * devices shipped express are delivered in about a day. In addition, most countries in the + * EU have access to standard shipping, which typically takes less than a week, one * way.

                                                    *
                                                  • *
                                                  • @@ -1490,12 +1530,13 @@ export interface ClusterMetadata { ForwardingAddressId?: string; /** - *

                                                    The tax documents required in your AWS Region.

                                                    + *

                                                    The tax documents required in your Amazon Web Services Region.

                                                    */ TaxDocuments?: TaxDocuments; /** - *

                                                    Represents metadata and configuration settings for services on an AWS Snow Family device.

                                                    + *

                                                    Represents metadata and configuration settings for services on an Amazon Web Services Snow Family + * device.

                                                    */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; } @@ -1544,9 +1585,9 @@ export namespace DescribeJobRequest { } /** - *

                                                    Defines the real-time status of a Snow device's data transfer while the device is at AWS. - * This data is only available while a job has a JobState value of - * InProgress, for both import and export jobs.

                                                    + *

                                                    Defines the real-time status of a Snow device's data transfer while the device is at + * Amazon Web Services. This data is only available while a job has a JobState + * value of InProgress, for both import and export jobs.

                                                    */ export interface DataTransfer { /** @@ -1560,14 +1601,15 @@ export interface DataTransfer { ObjectsTransferred?: number; /** - *

                                                    The total bytes of data for a transfer between a Snow device and Amazon S3. This value is - * set to 0 (zero) until all the keys that will be transferred have been listed.

                                                    + *

                                                    The total bytes of data for a transfer between a Snow device and Amazon S3. This value + * is set to 0 (zero) until all the keys that will be transferred have been listed.

                                                    */ TotalBytes?: number; /** - *

                                                    The total number of objects for a transfer between a Snow device and Amazon S3. This value - * is set to 0 (zero) until all the keys that will be transferred have been listed.

                                                    + *

                                                    The total number of objects for a transfer between a Snow device and Amazon S3. This + * value is set to 0 (zero) until all the keys that will be transferred have been + * listed.

                                                    */ TotalObjects?: number; } @@ -1582,16 +1624,16 @@ export namespace DataTransfer { } /** - *

                                                    Contains job logs. Whenever a Snow device is used to import data into or export data out of - * Amazon S3, you'll have the option of downloading a PDF job report. Job logs are returned as a - * part of the response syntax of the DescribeJob action in the + *

                                                    Contains job logs. Whenever a Snow device is used to import data into or export data + * out of Amazon S3, you'll have the option of downloading a PDF job report. Job logs are + * returned as a part of the response syntax of the DescribeJob action in the * JobMetadata data type. The job logs can be accessed for up to 60 minutes after * this request has been made. To access any of the job logs after 60 minutes have passed, you'll * have to make another call to the DescribeJob action.

                                                    * *

                                                    For import jobs, the PDF job report becomes available at the end of the import process. - * For export jobs, your job report typically becomes available while the Snow device for your job - * part is being delivered to you.

                                                    + * For export jobs, your job report typically becomes available while the Snow device for your + * job part is being delivered to you.

                                                    * *

                                                    The job report provides you insight into the state of your Amazon S3 data transfer. The * report includes details about your job or job part for your records.

                                                    @@ -1663,8 +1705,8 @@ export namespace Shipment { export interface ShippingDetails { /** *

                                                    The shipping speed for a particular job. This speed doesn't dictate how soon you'll get - * the Snow device from the job's creation date. This speed represents how quickly it moves to its - * destination while in transit. Regional shipping speeds are as follows:

                                                    + * the Snow device from the job's creation date. This speed represents how quickly it moves to + * its destination while in transit. Regional shipping speeds are as follows:

                                                    * *
                                                      *
                                                    • @@ -1672,8 +1714,8 @@ export interface ShippingDetails { * express are delivered in about a day.

                                                      *
                                                    • *
                                                    • - *

                                                      In the European Union (EU), you have access to express shipping. Typically, - * Snow devices shipped express are delivered in about a day. In addition, most countries in the + *

                                                      In the European Union (EU), you have access to express shipping. Typically, Snow + * devices shipped express are delivered in about a day. In addition, most countries in the * EU have access to standard shipping, which typically takes less than a week, one * way.

                                                      *
                                                    • @@ -1690,7 +1732,7 @@ export interface ShippingDetails { /** *

                                                      The Status and TrackingNumber values for a Snow device being - * returned to AWS for a particular job.

                                                      + * returned to Amazon Web Services for a particular job.

                                                      */ InboundShipment?: Shipment; @@ -1755,15 +1797,14 @@ export interface JobMetadata { Description?: string; /** - *

                                                      The Amazon Resource Name (ARN) for the AWS Key Management Service (AWS KMS) key - * associated with this job. This ARN was created using the CreateKey API action in AWS - * KMS.

                                                      + *

                                                      The Amazon Resource Name (ARN) for the Key Management Service (KMS) key + * associated with this job. This ARN was created using the CreateKey API action in KMS.

                                                      */ KmsKeyARN?: string; /** *

                                                      The role ARN associated with this job. This ARN was created using the CreateRole - * API action in AWS Identity and Access Management (IAM).

                                                      + * API action in Identity and Access Management.

                                                      */ RoleARN?: string; @@ -1801,16 +1842,17 @@ export interface JobMetadata { /** *

                                                      A value that defines the real-time status of a Snow device's data transfer while the - * device is at AWS. This data is only available while a job has a JobState value of - * InProgress, for both import and export jobs.

                                                      + * device is at Amazon Web Services. This data is only available while a job has a + * JobState value of InProgress, for both import and export + * jobs.

                                                      */ DataTransferProgress?: DataTransfer; /** *

                                                      Links to Amazon S3 presigned URLs for the job report and logs. For import jobs, the PDF * job report becomes available at the end of the import process. For export jobs, your job - * report typically becomes available while the Snow device for your job part is being delivered to - * you.

                                                      + * report typically becomes available while the Snow device for your job part is being delivered + * to you.

                                                      */ JobLogInfo?: JobLogs; @@ -1827,7 +1869,7 @@ export interface JobMetadata { ForwardingAddressId?: string; /** - *

                                                      The metadata associated with the tax documents required in your AWS Region.

                                                      + *

                                                      The metadata associated with the tax documents required in your Amazon Web Services Region.

                                                      */ TaxDocuments?: TaxDocuments; @@ -1850,7 +1892,8 @@ export interface JobMetadata { LongTermPricingId?: string; /** - *

                                                      Represents metadata and configuration settings for services on an AWS Snow Family device.

                                                      + *

                                                      Represents metadata and configuration settings for services on an Amazon Web Services Snow Family + * device.

                                                      */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; } @@ -1890,7 +1933,7 @@ export namespace DescribeJobResult { export interface DescribeReturnShippingLabelRequest { /** *

                                                      The automatically generated ID for a job, for example - * JID123e4567-e89b-12d3-a456-426655440000.

                                                      + * JID123e4567-e89b-12d3-a456-426655440000.

                                                      */ JobId: string | undefined; } @@ -1906,7 +1949,7 @@ export namespace DescribeReturnShippingLabelRequest { export interface DescribeReturnShippingLabelResult { /** - *

                                                      The status information of the task on a Snow device that is being returned to AWS.

                                                      + *

                                                      The status information of the task on a Snow device that is being returned to Amazon Web Services.

                                                      */ Status?: ShippingLabelStatus | string; @@ -1914,6 +1957,11 @@ export interface DescribeReturnShippingLabelResult { *

                                                      The expiration date of the current return shipping label.

                                                      */ ExpirationDate?: Date; + + /** + *

                                                      The pre-signed Amazon S3 URI used to download the return shipping label.

                                                      + */ + ReturnShippingLabelURI?: string; } export namespace DescribeReturnShippingLabelResult { @@ -2029,7 +2077,7 @@ export namespace GetSnowballUsageResult { export interface GetSoftwareUpdatesRequest { /** *

                                                      The ID for a job that you want to get the software update file for, for example - * JID123e4567-e89b-12d3-a456-426655440000.

                                                      + * JID123e4567-e89b-12d3-a456-426655440000.

                                                      */ JobId: string | undefined; } @@ -2046,8 +2094,9 @@ export namespace GetSoftwareUpdatesRequest { export interface GetSoftwareUpdatesResult { /** *

                                                      The Amazon S3 presigned URL for the update file associated with the specified - * JobId value. The software update will be available for 2 days after this request is made. - * To access an update after the 2 days have passed, you'll have to make another call to GetSoftwareUpdates.

                                                      + * JobId value. The software update will be available for 2 days after this + * request is made. To access an update after the 2 days have passed, you'll have to make another + * call to GetSoftwareUpdates.

                                                      */ UpdatesURI?: string; } @@ -2107,11 +2156,11 @@ export interface JobListEntry { JobState?: JobState | string; /** - *

                                                      A value that indicates that this job is a main job. A main job represents a - * successful request to create an export job. Main jobs aren't associated with any Snowballs. - * Instead, each main job will have at least one job part, and each job part is associated with - * a Snowball. It might take some time before the job parts associated with a particular main - * job are listed, because they are created after the main job is created.

                                                      + *

                                                      A value that indicates that this job is a main job. A main job represents a successful + * request to create an export job. Main jobs aren't associated with any Snowballs. Instead, each + * main job will have at least one job part, and each job part is associated with a Snowball. It + * might take some time before the job parts associated with a particular main job are listed, + * because they are created after the main job is created.

                                                      */ IsMaster?: boolean; @@ -2383,7 +2432,7 @@ export interface ListLongTermPricingRequest { /** *

                                                      Because HTTP requests are stateless, this is the starting point for your next list of - * ListLongTermPricing to return.

                                                      + * ListLongTermPricing to return.

                                                      */ NextToken?: string; } @@ -2398,7 +2447,8 @@ export namespace ListLongTermPricingRequest { } /** - *

                                                      Each LongTermPricingListEntry object contains information about a long-term pricing type.

                                                      + *

                                                      Each LongTermPricingListEntry object contains information about a long-term + * pricing type.

                                                      */ export interface LongTermPricingListEntry { /** @@ -2443,7 +2493,7 @@ export interface LongTermPricingListEntry { LongTermPricingStatus?: string; /** - *

                                                      The type of AWS Snow Family device associated with this long-term pricing job.

                                                      + *

                                                      The type of Snow Family Devices associated with this long-term pricing job.

                                                      */ SnowballType?: SnowballType | string; @@ -2494,8 +2544,7 @@ export interface UpdateClusterRequest { /** *

                                                      The new role Amazon Resource Name (ARN) that you want to associate with this cluster. - * To create a role ARN, use the CreateRole API action in AWS - * Identity and Access Management (IAM).

                                                      + * To create a role ARN, use the CreateRole API action in Identity and Access Management (IAM).

                                                      */ RoleARN?: string; @@ -2511,8 +2560,9 @@ export interface UpdateClusterRequest { Resources?: JobResource; /** - *

                                                      Specifies the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                      + *

                                                      Specifies the service or services on the Snow Family device that your transferred data + * will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS + * (Network File System).

                                                      */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; @@ -2568,8 +2618,8 @@ export interface UpdateJobRequest { /** *

                                                      The new role Amazon Resource Name (ARN) that you want to associate with this job. To - * create a role ARN, use the CreateRoleAWS Identity and Access - * Management (IAM) API action.

                                                      + * create a role ARN, use the CreateRoleIdentity and Access Management + * (IAM) API action.

                                                      */ RoleARN?: string; @@ -2584,8 +2634,9 @@ export interface UpdateJobRequest { Resources?: JobResource; /** - *

                                                      Specifies the service or services on the Snow Family device that your - * transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                      + *

                                                      Specifies the service or services on the Snow Family device that your transferred data + * will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File + * System) and the Amazon Web Services Storage Gateway service Tape Gateway type.

                                                      */ OnDeviceServiceConfiguration?: OnDeviceServiceConfiguration; @@ -2652,14 +2703,14 @@ export enum ShipmentState { export interface UpdateJobShipmentStateRequest { /** *

                                                      The job ID of the job whose shipment date you want to update, for example - * JID123e4567-e89b-12d3-a456-426655440000.

                                                      + * JID123e4567-e89b-12d3-a456-426655440000.

                                                      */ JobId: string | undefined; /** *

                                                      The state of a device when it is being shipped.

                                                      *

                                                      Set to RECEIVED when the device arrives at your location.

                                                      - *

                                                      Set to RETURNED when you have returned the device to AWS.

                                                      + *

                                                      Set to RETURNED when you have returned the device to Amazon Web Services.

                                                      */ ShipmentState: ShipmentState | string | undefined; } @@ -2691,7 +2742,8 @@ export interface UpdateLongTermPricingRequest { LongTermPricingId: string | undefined; /** - *

                                                      Specifies that a device that is ordered with long-term pricing should be replaced with a new device.

                                                      + *

                                                      Specifies that a device that is ordered with long-term pricing should be replaced with a + * new device.

                                                      */ ReplacementJob?: string; diff --git a/clients/client-snowball/src/protocols/Aws_json1_1.ts b/clients/client-snowball/src/protocols/Aws_json1_1.ts index f00e2fd16ade..a05a2214c58d 100644 --- a/clients/client-snowball/src/protocols/Aws_json1_1.ts +++ b/clients/client-snowball/src/protocols/Aws_json1_1.ts @@ -142,6 +142,7 @@ import { SnowconeDeviceConfiguration, TargetOnDeviceService, TaxDocuments, + TGWOnDeviceServiceConfiguration, UnsupportedAddressException, UpdateClusterRequest, UpdateClusterResult, @@ -2644,6 +2645,10 @@ const serializeAws_json1_1OnDeviceServiceConfiguration = ( input.NFSOnDeviceService !== null && { NFSOnDeviceService: serializeAws_json1_1NFSOnDeviceServiceConfiguration(input.NFSOnDeviceService, context), }), + ...(input.TGWOnDeviceService !== undefined && + input.TGWOnDeviceService !== null && { + TGWOnDeviceService: serializeAws_json1_1TGWOnDeviceServiceConfiguration(input.TGWOnDeviceService, context), + }), }; }; @@ -2711,6 +2716,16 @@ const serializeAws_json1_1TaxDocuments = (input: TaxDocuments, context: __SerdeC }; }; +const serializeAws_json1_1TGWOnDeviceServiceConfiguration = ( + input: TGWOnDeviceServiceConfiguration, + context: __SerdeContext +): any => { + return { + ...(input.StorageLimit !== undefined && input.StorageLimit !== null && { StorageLimit: input.StorageLimit }), + ...(input.StorageUnit !== undefined && input.StorageUnit !== null && { StorageUnit: input.StorageUnit }), + }; +}; + const serializeAws_json1_1UpdateClusterRequest = (input: UpdateClusterRequest, context: __SerdeContext): any => { return { ...(input.AddressId !== undefined && input.AddressId !== null && { AddressId: input.AddressId }), @@ -3019,6 +3034,7 @@ const deserializeAws_json1_1DescribeReturnShippingLabelResult = ( output.ExpirationDate !== undefined && output.ExpirationDate !== null ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.ExpirationDate))) : undefined, + ReturnShippingLabelURI: __expectString(output.ReturnShippingLabelURI), Status: __expectString(output.Status), } as any; }; @@ -3461,6 +3477,10 @@ const deserializeAws_json1_1OnDeviceServiceConfiguration = ( output.NFSOnDeviceService !== undefined && output.NFSOnDeviceService !== null ? deserializeAws_json1_1NFSOnDeviceServiceConfiguration(output.NFSOnDeviceService, context) : undefined, + TGWOnDeviceService: + output.TGWOnDeviceService !== undefined && output.TGWOnDeviceService !== null + ? deserializeAws_json1_1TGWOnDeviceServiceConfiguration(output.TGWOnDeviceService, context) + : undefined, } as any; }; @@ -3561,6 +3581,16 @@ const deserializeAws_json1_1TaxDocuments = (output: any, context: __SerdeContext } as any; }; +const deserializeAws_json1_1TGWOnDeviceServiceConfiguration = ( + output: any, + context: __SerdeContext +): TGWOnDeviceServiceConfiguration => { + return { + StorageLimit: __expectInt32(output.StorageLimit), + StorageUnit: __expectString(output.StorageUnit), + } as any; +}; + const deserializeAws_json1_1UnsupportedAddressException = ( output: any, context: __SerdeContext diff --git a/clients/client-ssm/README.md b/clients/client-ssm/README.md index 42fd9f1d775d..247ba0e26cf2 100644 --- a/clients/client-ssm/README.md +++ b/clients/client-ssm/README.md @@ -10,13 +10,19 @@ AWS SDK for JavaScript SSM Client for Node.js, Browser and React Native.

                                                      Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as collecting system inventory, applying operating system (OS) patches, automating the creation of Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. -Systems Manager lets you remotely and securely manage the configuration of your managed instances. A -managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any -on-premises server or virtual machine (VM) in your hybrid environment that has been configured -for Systems Manager.

                                                      +Systems Manager lets you remotely and securely manage the configuration of your managed nodes. A +managed node is any Amazon Elastic Compute Cloud (Amazon EC2) instance, edge +device, or on-premises server or virtual machine (VM) that has been configured for +Systems Manager.

                                                      + +

                                                      With support for IoT Greengrass Version 2 devices, the phrase managed +instance has been changed to managed node in most of the Systems Manager +documentation. The Systems Manager console, API +calls, error messages, and SSM documents still use the term instance.

                                                      +

                                                      This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

                                                      -

                                                      To get started, verify prerequisites and configure managed instances. For more information, -see Setting up +

                                                      To get started, verify prerequisites and configure managed nodes. For more information, see +Setting up Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

                                                      Related resources diff --git a/clients/client-ssm/src/SSM.ts b/clients/client-ssm/src/SSM.ts index bc32d10b6934..78d18bd822b6 100644 --- a/clients/client-ssm/src/SSM.ts +++ b/clients/client-ssm/src/SSM.ts @@ -669,13 +669,19 @@ import { SSMClient } from "./SSMClient"; *

                                                      Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as * collecting system inventory, applying operating system (OS) patches, automating the creation of * Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. - * Systems Manager lets you remotely and securely manage the configuration of your managed instances. A - * managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any - * on-premises server or virtual machine (VM) in your hybrid environment that has been configured - * for Systems Manager.

                                                      + * Systems Manager lets you remotely and securely manage the configuration of your managed nodes. A + * managed node is any Amazon Elastic Compute Cloud (Amazon EC2) instance, edge + * device, or on-premises server or virtual machine (VM) that has been configured for + * Systems Manager.

                                                      + * + *

                                                      With support for IoT Greengrass Version 2 devices, the phrase managed + * instance has been changed to managed node in most of the Systems Manager + * documentation. The Systems Manager console, API + * calls, error messages, and SSM documents still use the term instance.

                                                      + *
                                                      *

                                                      This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

                                                      - *

                                                      To get started, verify prerequisites and configure managed instances. For more information, - * see Setting up + *

                                                      To get started, verify prerequisites and configure managed nodes. For more information, see + * Setting up * Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

                                                      *

                                                      * Related resources @@ -702,11 +708,11 @@ import { SSMClient } from "./SSMClient"; export class SSM extends SSMClient { /** *

                                                      Adds or overwrites one or more tags for the specified resource. Tags are metadata that you - * can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, - * and patch baselines. Tags enable you to categorize your resources in different ways, for example, - * by purpose, owner, or environment. Each tag consists of a key and an optional value, both of - * which you define. For example, you could define a set of tags for your account's managed - * instances that helps you track each instance's owner and stack level. For example:

                                                      + * can assign to your documents, managed nodes, maintenance windows, Parameter Store parameters, and + * patch baselines. Tags enable you to categorize your resources in different ways, for example, by + * purpose, owner, or environment. Each tag consists of a key and an optional value, both of which + * you define. For example, you could define a set of tags for your account's managed nodes that + * helps you track each node's owner and stack level. For example:

                                                      *
                                                        *
                                                      • *

                                                        @@ -879,14 +885,14 @@ export class SSM extends SSMClient { /** *

                                                        Generates an activation code and activation ID you can use to register your on-premises - * server or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it - * possible to manage them using Systems Manager capabilities. You use the activation code and ID when - * installing SSM Agent on machines in your hybrid environment. For more information about - * requirements for managing on-premises instances and VMs using Systems Manager, see Setting up + * servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with + * Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and + * ID when installing SSM Agent on machines in your hybrid environment. For more information about + * requirements for managing on-premises machines using Systems Manager, see Setting up * Amazon Web Services Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User Guide.

                                                        * - *

                                                        On-premises servers or VMs that are registered with Systems Manager and Amazon Elastic Compute Cloud (Amazon EC2) instances - * that you manage with Systems Manager are all called managed instances.

                                                        + *

                                                        Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed + * nodes.

                                                        *
                                                        */ public createActivation( @@ -919,16 +925,16 @@ export class SSM extends SSMClient { } /** - *

                                                        A State Manager association defines the state that you want to maintain on your instances. - * For example, an association can specify that anti-virus software must be installed and running on - * your instances, or that certain ports must be closed. For static targets, the association - * specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an - * Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager - * applies the configuration when new instances are added to the group. The association also - * specifies actions to take when applying the configuration. For example, an association for - * anti-virus software might run once a day. If the software isn't installed, then State Manager - * installs it. If the software is installed, but the service isn't running, then the association - * might instruct State Manager to start the service.

                                                        + *

                                                        A State Manager association defines the state that you want to maintain on your managed + * nodes. For example, an association can specify that anti-virus software must be installed and + * running on your managed nodes, or that certain ports must be closed. For static targets, the + * association specifies a schedule for when the configuration is reapplied. For dynamic targets, + * such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of + * Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The + * association also specifies actions to take when applying the configuration. For example, an + * association for anti-virus software might run once a day. If the software isn't installed, then + * State Manager installs it. If the software is installed, but the service isn't running, then the + * association might instruct State Manager to start the service.

                                                        */ public createAssociation( args: CreateAssociationCommandInput, @@ -960,12 +966,12 @@ export class SSM extends SSMClient { } /** - *

                                                        Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified instances or - * targets.

                                                        - *

                                                        When you associate a document with one or more instances using instance IDs or tags, - * Amazon Web Services Systems Manager Agent (SSM Agent) running on the instance processes the document and configures the - * instance as specified.

                                                        - *

                                                        If you associate a document with an instance that already has an associated document, the + *

                                                        Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified managed nodes + * or targets.

                                                        + *

                                                        When you associate a document with one or more managed nodes using IDs or tags, + * Amazon Web Services Systems Manager Agent (SSM Agent) running on the managed node processes the document and configures the + * node as specified.

                                                        + *

                                                        If you associate a document with a managed node that already has an associated document, the * system returns the AssociationAlreadyExists exception.

                                                        */ public createAssociationBatch( @@ -999,7 +1005,7 @@ export class SSM extends SSMClient { /** *

                                                        Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs - * on your managed instances. For more information about SSM documents, including information about + * on your managed nodes. For more information about SSM documents, including information about * supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the * Amazon Web Services Systems Manager User Guide.

                                                        */ @@ -1232,9 +1238,8 @@ export class SSM extends SSMClient { /** *

                                                        Deletes an activation. You aren't required to delete an activation. If you delete an - * activation, you can no longer use it to register additional managed instances. Deleting an - * activation doesn't de-register managed instances. You must manually de-register managed - * instances.

                                                        + * activation, you can no longer use it to register additional managed nodes. Deleting an activation + * doesn't de-register managed nodes. You must manually de-register managed nodes.

                                                        */ public deleteActivation( args: DeleteActivationCommandInput, @@ -1266,13 +1271,13 @@ export class SSM extends SSMClient { } /** - *

                                                        Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified instance. + *

                                                        Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified managed node. * If you created the association by using the Targets parameter, then you must delete * the association by using the association ID.

                                                        - *

                                                        When you disassociate a document from an instance, it doesn't change the configuration of - * the instance. To change the configuration state of an instance after you disassociate a document, + *

                                                        When you disassociate a document from a managed node, it doesn't change the configuration of + * the node. To change the configuration state of a managed node after you disassociate a document, * you must create a new document with the desired configuration and associate it with the - * instance.

                                                        + * node.

                                                        */ public deleteAssociation( args: DeleteAssociationCommandInput, @@ -1304,9 +1309,9 @@ export class SSM extends SSMClient { } /** - *

                                                        Deletes the Amazon Web Services Systems Manager document (SSM document) and all instance associations to the + *

                                                        Deletes the Amazon Web Services Systems Manager document (SSM document) and all managed node associations to the * document.

                                                        - *

                                                        Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

                                                        + *

                                                        Before you delete the document, we recommend that you use DeleteAssociation to disassociate all managed nodes that are associated with the document.

                                                        */ public deleteDocument( args: DeleteDocumentCommandInput, @@ -1534,8 +1539,8 @@ export class SSM extends SSMClient { /** *

                                                        Deletes a resource data sync configuration. After the configuration is deleted, changes to - * data on managed instances are no longer synced to or from the target. Deleting a sync - * configuration doesn't delete data.

                                                        + * data on managed nodes are no longer synced to or from the target. Deleting a sync configuration + * doesn't delete data.

                                                        */ public deleteResourceDataSync( args: DeleteResourceDataSyncCommandInput, @@ -1568,7 +1573,7 @@ export class SSM extends SSMClient { /** *

                                                        Removes the server or virtual machine from the list of registered servers. You can - * reregister the instance again at any time. If you don't plan to use Run Command on the server, we + * reregister the node again at any time. If you don't plan to use Run Command on the server, we * suggest uninstalling SSM Agent first.

                                                        */ public deregisterManagedInstance( @@ -1701,7 +1706,7 @@ export class SSM extends SSMClient { /** *

                                                        Describes details about the activation, such as the date and time the activation was * created, its expiration date, the Identity and Access Management (IAM) role assigned to - * the instances in the activation, and the number of instances registered by using this + * the managed nodes in the activation, and the number of nodes registered by using this * activation.

                                                        */ public describeActivations( @@ -1734,7 +1739,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Describes the association for the specified target or instance. If you created the + *

                                                        Describes the association for the specified target or managed node. If you created the * association by using the Targets parameter, then you must retrieve the association * by using the association ID.

                                                        */ @@ -1995,7 +2000,7 @@ export class SSM extends SSMClient { } /** - *

                                                        All associations for the instance(s).

                                                        + *

                                                        All associations for the managed node(s).

                                                        */ public describeEffectiveInstanceAssociations( args: DescribeEffectiveInstanceAssociationsCommandInput, @@ -2064,7 +2069,7 @@ export class SSM extends SSMClient { } /** - *

                                                        The status of the associations for the instance(s).

                                                        + *

                                                        The status of the associations for the managed node(s).

                                                        */ public describeInstanceAssociationsStatus( args: DescribeInstanceAssociationsStatusCommandInput, @@ -2096,14 +2101,15 @@ export class SSM extends SSMClient { } /** - *

                                                        Describes one or more of your instances, including information about the operating system - * platform, the version of SSM Agent installed on the instance, instance status, and so on.

                                                        - *

                                                        If you specify one or more instance IDs, it returns information for those instances. If you - * don't specify instance IDs, it returns information for all your instances. If you specify an - * instance ID that isn't valid or an instance that you don't own, you receive an error.

                                                        + *

                                                        Describes one or more of your managed nodes, including information about the operating + * system platform, the version of SSM Agent installed on the managed node, node status, and so + * on.

                                                        + *

                                                        If you specify one or more managed node IDs, it returns information for those managed nodes. If + * you don't specify node IDs, it returns information for all your managed nodes. If you specify + * a node ID that isn't valid or a node that you don't own, you receive an error.

                                                        * *

                                                        The IamRole field for this API operation is the Identity and Access Management - * (IAM) role assigned to on-premises instances. This call doesn't return the + * (IAM) role assigned to on-premises managed nodes. This call doesn't return the * IAM role for EC2 instances.

                                                        *
                                                        */ @@ -2137,8 +2143,8 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves information about the patches on the specified instance and their state relative - * to the patch baseline being used for the instance.

                                                        + *

                                                        Retrieves information about the patches on the specified managed node and their state relative + * to the patch baseline being used for the node.

                                                        */ public describeInstancePatches( args: DescribeInstancePatchesCommandInput, @@ -2170,7 +2176,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves the high-level patch state of one or more instances.

                                                        + *

                                                        Retrieves the high-level patch state of one or more managed nodes.

                                                        */ public describeInstancePatchStates( args: DescribeInstancePatchStatesCommandInput, @@ -2202,7 +2208,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves the high-level patch state for the instances in the specified patch group.

                                                        + *

                                                        Retrieves the high-level patch state for the managed nodes in the specified patch group.

                                                        */ public describeInstancePatchStatesForPatchGroup( args: DescribeInstancePatchStatesForPatchGroupCommandInput, @@ -2435,7 +2441,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves information about the maintenance window targets or tasks that an instance is + *

                                                        Retrieves information about the maintenance window targets or tasks that a managed node is * associated with.

                                                        */ public describeMaintenanceWindowsForTarget( @@ -2952,8 +2958,8 @@ export class SSM extends SSMClient { *

                                                        Returns detailed information about command execution for an invocation or plugin.

                                                        *

                                                        * GetCommandInvocation only gives the execution status of a plugin in a document. - * To get the command execution status on a specific instance, use ListCommandInvocations. To get the command execution status across instances, use - * ListCommands.

                                                        + * To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, + * use ListCommands.

                                                        */ public getCommandInvocation( args: GetCommandInvocationCommandInput, @@ -2985,7 +2991,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves the Session Manager connection status for an instance to determine whether it is running and + *

                                                        Retrieves the Session Manager connection status for a managed node to determine whether it is running and * ready to receive Session Manager connections.

                                                        */ public getConnectionStatus( @@ -3053,12 +3059,12 @@ export class SSM extends SSMClient { } /** - *

                                                        Retrieves the current snapshot for the patch baseline the instance uses. This API is + *

                                                        Retrieves the current snapshot for the patch baseline the managed node uses. This API is * primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

                                                        * *

                                                        If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid * this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of - * Amazon Web Services Systems Manager, with an SSM document that enables you to target an instance with a script or command. + * Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. * For example, run the command using the AWS-RunShellScript document or the * AWS-RunPowerShellScript document.

                                                        *
                                                        @@ -3121,7 +3127,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Query inventory information. This includes instance status, such as Stopped or + *

                                                        Query inventory information. This includes managed node status, such as Stopped or * Terminated.

                                                        */ public getInventory( @@ -3774,7 +3780,7 @@ export class SSM extends SSMClient { /** *

                                                        Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You - * can limit the results to a specific State Manager association document or instance by specifying + * can limit the results to a specific State Manager association document or managed node by specifying * a filter. State Manager is a capability of Amazon Web Services Systems Manager.

                                                        */ public listAssociations( @@ -3839,10 +3845,10 @@ export class SSM extends SSMClient { } /** - *

                                                        An invocation is copy of a command sent to a specific instance. A command can apply to one - * or more instances. A command invocation applies to one instance. For example, if a user runs - * SendCommand against three instances, then a command invocation is created for each - * requested instance ID. ListCommandInvocations provide status about command + *

                                                        An invocation is copy of a command sent to a specific managed node. A command can apply to one + * or more managed nodes. A command invocation applies to one managed node. For example, if a user runs + * SendCommand against three managed nodes, then a command invocation is created for + * each requested managed node ID. ListCommandInvocations provide status about command * execution.

                                                        */ public listCommandInvocations( @@ -4357,7 +4363,7 @@ export class SSM extends SSMClient { *
                                                      • *
                                                      • *

                                                        ExecutionTime. The time the patch, association, or custom compliance item was applied to - * the instance.

                                                        + * the managed node.

                                                        *
                                                      • *
                                                      • *

                                                        Id: The patch, association, or custom compliance ID.

                                                        @@ -4429,7 +4435,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Bulk update custom inventory items on one more instance. The request adds an inventory item, + *

                                                        Bulk update custom inventory items on one or more managed nodes. The request adds an inventory item, * if it doesn't already exist, or updates an inventory item, if it does exist.

                                                        */ public putInventory( @@ -4698,7 +4704,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Reconnects a session to an instance after it has been disconnected. Connections can be + *

                                                        Reconnects a session to a managed node after it has been disconnected. Connections can be * resumed for disconnected sessions, but not terminated sessions.

                                                        * *

                                                        This command is primarily for use by client machines to automatically reconnect during @@ -4768,7 +4774,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Runs commands on one or more managed instances.

                                                        + *

                                                        Runs commands on one or more managed nodes.

                                                        */ public sendCommand(args: SendCommandCommandInput, options?: __HttpHandlerOptions): Promise; public sendCommand(args: SendCommandCommandInput, cb: (err: any, data?: SendCommandCommandOutput) => void): void; @@ -4893,7 +4899,7 @@ export class SSM extends SSMClient { } /** - *

                                                        Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a + *

                                                        Initiates a connection to a target (for example, a managed node) for a Session Manager session. Returns a * URL and token that can be used to open a WebSocket connection for sending input and receiving * outputs.

                                                        * @@ -4964,7 +4970,7 @@ export class SSM extends SSMClient { /** *

                                                        Permanently ends a session and closes the data connection between the Session Manager client and - * SSM Agent on the instance. A terminated session isn't be resumed.

                                                        + * SSM Agent on the managed node. A terminated session isn't be resumed.

                                                        */ public terminateSession( args: TerminateSessionCommandInput, @@ -5073,7 +5079,7 @@ export class SSM extends SSMClient { /** *

                                                        Updates the status of the Amazon Web Services Systems Manager document (SSM document) associated with the specified - * instance.

                                                        + * managed node.

                                                        *

                                                        * UpdateAssociationStatus is primarily used by the Amazon Web Services Systems Manager Agent (SSM Agent) to * report status updates about your associations and is only used for associations created with the @@ -5392,8 +5398,8 @@ export class SSM extends SSMClient { /** *

                                                        Changes the Identity and Access Management (IAM) role that is assigned to the - * on-premises instance or virtual machines (VM). IAM roles are first assigned to - * these hybrid instances during the activation process. For more information, see CreateActivation.

                                                        + * on-premises server, edge device, or virtual machines (VM). IAM roles are first + * assigned to these hybrid nodes during the activation process. For more information, see CreateActivation.

                                                        */ public updateManagedInstanceRole( args: UpdateManagedInstanceRoleCommandInput, diff --git a/clients/client-ssm/src/SSMClient.ts b/clients/client-ssm/src/SSMClient.ts index 2f906ba8bb5d..23ced5245862 100644 --- a/clients/client-ssm/src/SSMClient.ts +++ b/clients/client-ssm/src/SSMClient.ts @@ -882,13 +882,19 @@ export interface SSMClientResolvedConfig extends SSMClientResolvedConfigType {} *

                                                        Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as * collecting system inventory, applying operating system (OS) patches, automating the creation of * Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale. - * Systems Manager lets you remotely and securely manage the configuration of your managed instances. A - * managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any - * on-premises server or virtual machine (VM) in your hybrid environment that has been configured - * for Systems Manager.

                                                        + * Systems Manager lets you remotely and securely manage the configuration of your managed nodes. A + * managed node is any Amazon Elastic Compute Cloud (Amazon EC2) instance, edge + * device, or on-premises server or virtual machine (VM) that has been configured for + * Systems Manager.

                                                        + * + *

                                                        With support for IoT Greengrass Version 2 devices, the phrase managed + * instance has been changed to managed node in most of the Systems Manager + * documentation. The Systems Manager console, API + * calls, error messages, and SSM documents still use the term instance.

                                                        + *
                                                        *

                                                        This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

                                                        - *

                                                        To get started, verify prerequisites and configure managed instances. For more information, - * see Setting up + *

                                                        To get started, verify prerequisites and configure managed nodes. For more information, see + * Setting up * Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

                                                        *

                                                        * Related resources diff --git a/clients/client-ssm/src/commands/AddTagsToResourceCommand.ts b/clients/client-ssm/src/commands/AddTagsToResourceCommand.ts index 7b76616f2d09..d1ee11adb44d 100644 --- a/clients/client-ssm/src/commands/AddTagsToResourceCommand.ts +++ b/clients/client-ssm/src/commands/AddTagsToResourceCommand.ts @@ -23,11 +23,11 @@ export interface AddTagsToResourceCommandOutput extends AddTagsToResourceResult, /** *

                                                        Adds or overwrites one or more tags for the specified resource. Tags are metadata that you - * can assign to your documents, managed instances, maintenance windows, Parameter Store parameters, - * and patch baselines. Tags enable you to categorize your resources in different ways, for example, - * by purpose, owner, or environment. Each tag consists of a key and an optional value, both of - * which you define. For example, you could define a set of tags for your account's managed - * instances that helps you track each instance's owner and stack level. For example:

                                                        + * can assign to your documents, managed nodes, maintenance windows, Parameter Store parameters, and + * patch baselines. Tags enable you to categorize your resources in different ways, for example, by + * purpose, owner, or environment. Each tag consists of a key and an optional value, both of which + * you define. For example, you could define a set of tags for your account's managed nodes that + * helps you track each node's owner and stack level. For example:

                                                        *
                                                          *
                                                        • *

                                                          diff --git a/clients/client-ssm/src/commands/CreateActivationCommand.ts b/clients/client-ssm/src/commands/CreateActivationCommand.ts index fcebbee5ec3d..0c4bf4cd91a1 100644 --- a/clients/client-ssm/src/commands/CreateActivationCommand.ts +++ b/clients/client-ssm/src/commands/CreateActivationCommand.ts @@ -23,14 +23,14 @@ export interface CreateActivationCommandOutput extends CreateActivationResult, _ /** *

                                                          Generates an activation code and activation ID you can use to register your on-premises - * server or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it - * possible to manage them using Systems Manager capabilities. You use the activation code and ID when - * installing SSM Agent on machines in your hybrid environment. For more information about - * requirements for managing on-premises instances and VMs using Systems Manager, see Setting up + * servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with + * Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and + * ID when installing SSM Agent on machines in your hybrid environment. For more information about + * requirements for managing on-premises machines using Systems Manager, see Setting up * Amazon Web Services Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User Guide.

                                                          * - *

                                                          On-premises servers or VMs that are registered with Systems Manager and Amazon Elastic Compute Cloud (Amazon EC2) instances - * that you manage with Systems Manager are all called managed instances.

                                                          + *

                                                          Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed + * nodes.

                                                          *
                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/CreateAssociationBatchCommand.ts b/clients/client-ssm/src/commands/CreateAssociationBatchCommand.ts index 0c05abe3c919..70f2a91e3690 100644 --- a/clients/client-ssm/src/commands/CreateAssociationBatchCommand.ts +++ b/clients/client-ssm/src/commands/CreateAssociationBatchCommand.ts @@ -22,12 +22,12 @@ export interface CreateAssociationBatchCommandInput extends CreateAssociationBat export interface CreateAssociationBatchCommandOutput extends CreateAssociationBatchResult, __MetadataBearer {} /** - *

                                                          Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified instances or - * targets.

                                                          - *

                                                          When you associate a document with one or more instances using instance IDs or tags, - * Amazon Web Services Systems Manager Agent (SSM Agent) running on the instance processes the document and configures the - * instance as specified.

                                                          - *

                                                          If you associate a document with an instance that already has an associated document, the + *

                                                          Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified managed nodes + * or targets.

                                                          + *

                                                          When you associate a document with one or more managed nodes using IDs or tags, + * Amazon Web Services Systems Manager Agent (SSM Agent) running on the managed node processes the document and configures the + * node as specified.

                                                          + *

                                                          If you associate a document with a managed node that already has an associated document, the * system returns the AssociationAlreadyExists exception.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/CreateAssociationCommand.ts b/clients/client-ssm/src/commands/CreateAssociationCommand.ts index 6788dda432ef..98bf56326b4c 100644 --- a/clients/client-ssm/src/commands/CreateAssociationCommand.ts +++ b/clients/client-ssm/src/commands/CreateAssociationCommand.ts @@ -22,16 +22,16 @@ export interface CreateAssociationCommandInput extends CreateAssociationRequest export interface CreateAssociationCommandOutput extends CreateAssociationResult, __MetadataBearer {} /** - *

                                                          A State Manager association defines the state that you want to maintain on your instances. - * For example, an association can specify that anti-virus software must be installed and running on - * your instances, or that certain ports must be closed. For static targets, the association - * specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an - * Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager - * applies the configuration when new instances are added to the group. The association also - * specifies actions to take when applying the configuration. For example, an association for - * anti-virus software might run once a day. If the software isn't installed, then State Manager - * installs it. If the software is installed, but the service isn't running, then the association - * might instruct State Manager to start the service.

                                                          + *

                                                          A State Manager association defines the state that you want to maintain on your managed + * nodes. For example, an association can specify that anti-virus software must be installed and + * running on your managed nodes, or that certain ports must be closed. For static targets, the + * association specifies a schedule for when the configuration is reapplied. For dynamic targets, + * such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of + * Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The + * association also specifies actions to take when applying the configuration. For example, an + * association for anti-virus software might run once a day. If the software isn't installed, then + * State Manager installs it. If the software is installed, but the service isn't running, then the + * association might instruct State Manager to start the service.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/CreateDocumentCommand.ts b/clients/client-ssm/src/commands/CreateDocumentCommand.ts index 098b7ba16b94..3a6bb82fa76b 100644 --- a/clients/client-ssm/src/commands/CreateDocumentCommand.ts +++ b/clients/client-ssm/src/commands/CreateDocumentCommand.ts @@ -23,7 +23,7 @@ export interface CreateDocumentCommandOutput extends CreateDocumentResult, __Met /** *

                                                          Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs - * on your managed instances. For more information about SSM documents, including information about + * on your managed nodes. For more information about SSM documents, including information about * supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the * Amazon Web Services Systems Manager User Guide.

                                                          * @example diff --git a/clients/client-ssm/src/commands/DeleteActivationCommand.ts b/clients/client-ssm/src/commands/DeleteActivationCommand.ts index bf4e46c608e8..0cea2d31583b 100644 --- a/clients/client-ssm/src/commands/DeleteActivationCommand.ts +++ b/clients/client-ssm/src/commands/DeleteActivationCommand.ts @@ -23,9 +23,8 @@ export interface DeleteActivationCommandOutput extends DeleteActivationResult, _ /** *

                                                          Deletes an activation. You aren't required to delete an activation. If you delete an - * activation, you can no longer use it to register additional managed instances. Deleting an - * activation doesn't de-register managed instances. You must manually de-register managed - * instances.

                                                          + * activation, you can no longer use it to register additional managed nodes. Deleting an activation + * doesn't de-register managed nodes. You must manually de-register managed nodes.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DeleteAssociationCommand.ts b/clients/client-ssm/src/commands/DeleteAssociationCommand.ts index 878e3905ff0f..7f685a47f6aa 100644 --- a/clients/client-ssm/src/commands/DeleteAssociationCommand.ts +++ b/clients/client-ssm/src/commands/DeleteAssociationCommand.ts @@ -22,13 +22,13 @@ export interface DeleteAssociationCommandInput extends DeleteAssociationRequest export interface DeleteAssociationCommandOutput extends DeleteAssociationResult, __MetadataBearer {} /** - *

                                                          Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified instance. + *

                                                          Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified managed node. * If you created the association by using the Targets parameter, then you must delete * the association by using the association ID.

                                                          - *

                                                          When you disassociate a document from an instance, it doesn't change the configuration of - * the instance. To change the configuration state of an instance after you disassociate a document, + *

                                                          When you disassociate a document from a managed node, it doesn't change the configuration of + * the node. To change the configuration state of a managed node after you disassociate a document, * you must create a new document with the desired configuration and associate it with the - * instance.

                                                          + * node.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DeleteDocumentCommand.ts b/clients/client-ssm/src/commands/DeleteDocumentCommand.ts index e13240f5a6c3..7c353d310d81 100644 --- a/clients/client-ssm/src/commands/DeleteDocumentCommand.ts +++ b/clients/client-ssm/src/commands/DeleteDocumentCommand.ts @@ -22,9 +22,9 @@ export interface DeleteDocumentCommandInput extends DeleteDocumentRequest {} export interface DeleteDocumentCommandOutput extends DeleteDocumentResult, __MetadataBearer {} /** - *

                                                          Deletes the Amazon Web Services Systems Manager document (SSM document) and all instance associations to the + *

                                                          Deletes the Amazon Web Services Systems Manager document (SSM document) and all managed node associations to the * document.

                                                          - *

                                                          Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

                                                          + *

                                                          Before you delete the document, we recommend that you use DeleteAssociation to disassociate all managed nodes that are associated with the document.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DeleteResourceDataSyncCommand.ts b/clients/client-ssm/src/commands/DeleteResourceDataSyncCommand.ts index bf68fdc7a21a..b128f402960d 100644 --- a/clients/client-ssm/src/commands/DeleteResourceDataSyncCommand.ts +++ b/clients/client-ssm/src/commands/DeleteResourceDataSyncCommand.ts @@ -23,8 +23,8 @@ export interface DeleteResourceDataSyncCommandOutput extends DeleteResourceDataS /** *

                                                          Deletes a resource data sync configuration. After the configuration is deleted, changes to - * data on managed instances are no longer synced to or from the target. Deleting a sync - * configuration doesn't delete data.

                                                          + * data on managed nodes are no longer synced to or from the target. Deleting a sync configuration + * doesn't delete data.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DeregisterManagedInstanceCommand.ts b/clients/client-ssm/src/commands/DeregisterManagedInstanceCommand.ts index 8b52c03e0332..1aa8d3b491fc 100644 --- a/clients/client-ssm/src/commands/DeregisterManagedInstanceCommand.ts +++ b/clients/client-ssm/src/commands/DeregisterManagedInstanceCommand.ts @@ -23,7 +23,7 @@ export interface DeregisterManagedInstanceCommandOutput extends DeregisterManage /** *

                                                          Removes the server or virtual machine from the list of registered servers. You can - * reregister the instance again at any time. If you don't plan to use Run Command on the server, we + * reregister the node again at any time. If you don't plan to use Run Command on the server, we * suggest uninstalling SSM Agent first.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/DescribeActivationsCommand.ts b/clients/client-ssm/src/commands/DescribeActivationsCommand.ts index cf30c9791e9b..a7fda2edb893 100644 --- a/clients/client-ssm/src/commands/DescribeActivationsCommand.ts +++ b/clients/client-ssm/src/commands/DescribeActivationsCommand.ts @@ -24,7 +24,7 @@ export interface DescribeActivationsCommandOutput extends DescribeActivationsRes /** *

                                                          Describes details about the activation, such as the date and time the activation was * created, its expiration date, the Identity and Access Management (IAM) role assigned to - * the instances in the activation, and the number of instances registered by using this + * the managed nodes in the activation, and the number of nodes registered by using this * activation.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/DescribeAssociationCommand.ts b/clients/client-ssm/src/commands/DescribeAssociationCommand.ts index a9eeaaa19dd9..bee86dd6fead 100644 --- a/clients/client-ssm/src/commands/DescribeAssociationCommand.ts +++ b/clients/client-ssm/src/commands/DescribeAssociationCommand.ts @@ -22,7 +22,7 @@ export interface DescribeAssociationCommandInput extends DescribeAssociationRequ export interface DescribeAssociationCommandOutput extends DescribeAssociationResult, __MetadataBearer {} /** - *

                                                          Describes the association for the specified target or instance. If you created the + *

                                                          Describes the association for the specified target or managed node. If you created the * association by using the Targets parameter, then you must retrieve the association * by using the association ID.

                                                          * @example diff --git a/clients/client-ssm/src/commands/DescribeEffectiveInstanceAssociationsCommand.ts b/clients/client-ssm/src/commands/DescribeEffectiveInstanceAssociationsCommand.ts index 4dcd9e96e229..2a16010e14b6 100644 --- a/clients/client-ssm/src/commands/DescribeEffectiveInstanceAssociationsCommand.ts +++ b/clients/client-ssm/src/commands/DescribeEffectiveInstanceAssociationsCommand.ts @@ -28,7 +28,7 @@ export interface DescribeEffectiveInstanceAssociationsCommandOutput __MetadataBearer {} /** - *

                                                          All associations for the instance(s).

                                                          + *

                                                          All associations for the managed node(s).

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DescribeInstanceAssociationsStatusCommand.ts b/clients/client-ssm/src/commands/DescribeInstanceAssociationsStatusCommand.ts index 5ea3901fc918..8c5f06b12821 100644 --- a/clients/client-ssm/src/commands/DescribeInstanceAssociationsStatusCommand.ts +++ b/clients/client-ssm/src/commands/DescribeInstanceAssociationsStatusCommand.ts @@ -27,7 +27,7 @@ export interface DescribeInstanceAssociationsStatusCommandOutput __MetadataBearer {} /** - *

                                                          The status of the associations for the instance(s).

                                                          + *

                                                          The status of the associations for the managed node(s).

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DescribeInstanceInformationCommand.ts b/clients/client-ssm/src/commands/DescribeInstanceInformationCommand.ts index 67056dcaf3d9..22a2d3cefd0c 100644 --- a/clients/client-ssm/src/commands/DescribeInstanceInformationCommand.ts +++ b/clients/client-ssm/src/commands/DescribeInstanceInformationCommand.ts @@ -22,14 +22,15 @@ export interface DescribeInstanceInformationCommandInput extends DescribeInstanc export interface DescribeInstanceInformationCommandOutput extends DescribeInstanceInformationResult, __MetadataBearer {} /** - *

                                                          Describes one or more of your instances, including information about the operating system - * platform, the version of SSM Agent installed on the instance, instance status, and so on.

                                                          - *

                                                          If you specify one or more instance IDs, it returns information for those instances. If you - * don't specify instance IDs, it returns information for all your instances. If you specify an - * instance ID that isn't valid or an instance that you don't own, you receive an error.

                                                          + *

                                                          Describes one or more of your managed nodes, including information about the operating + * system platform, the version of SSM Agent installed on the managed node, node status, and so + * on.

                                                          + *

                                                          If you specify one or more managed node IDs, it returns information for those managed nodes. If + * you don't specify node IDs, it returns information for all your managed nodes. If you specify + * a node ID that isn't valid or a node that you don't own, you receive an error.

                                                          * *

                                                          The IamRole field for this API operation is the Identity and Access Management - * (IAM) role assigned to on-premises instances. This call doesn't return the + * (IAM) role assigned to on-premises managed nodes. This call doesn't return the * IAM role for EC2 instances.

                                                          *
                                                          * @example diff --git a/clients/client-ssm/src/commands/DescribeInstancePatchStatesCommand.ts b/clients/client-ssm/src/commands/DescribeInstancePatchStatesCommand.ts index bbb1e876d52f..1dcd2e4e5d81 100644 --- a/clients/client-ssm/src/commands/DescribeInstancePatchStatesCommand.ts +++ b/clients/client-ssm/src/commands/DescribeInstancePatchStatesCommand.ts @@ -22,7 +22,7 @@ export interface DescribeInstancePatchStatesCommandInput extends DescribeInstanc export interface DescribeInstancePatchStatesCommandOutput extends DescribeInstancePatchStatesResult, __MetadataBearer {} /** - *

                                                          Retrieves the high-level patch state of one or more instances.

                                                          + *

                                                          Retrieves the high-level patch state of one or more managed nodes.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DescribeInstancePatchStatesForPatchGroupCommand.ts b/clients/client-ssm/src/commands/DescribeInstancePatchStatesForPatchGroupCommand.ts index e64cf3173bfc..b46d486e534d 100644 --- a/clients/client-ssm/src/commands/DescribeInstancePatchStatesForPatchGroupCommand.ts +++ b/clients/client-ssm/src/commands/DescribeInstancePatchStatesForPatchGroupCommand.ts @@ -28,7 +28,7 @@ export interface DescribeInstancePatchStatesForPatchGroupCommandOutput __MetadataBearer {} /** - *

                                                          Retrieves the high-level patch state for the instances in the specified patch group.

                                                          + *

                                                          Retrieves the high-level patch state for the managed nodes in the specified patch group.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DescribeInstancePatchesCommand.ts b/clients/client-ssm/src/commands/DescribeInstancePatchesCommand.ts index 1e0e6d8c0f01..d7cf6dfc49f5 100644 --- a/clients/client-ssm/src/commands/DescribeInstancePatchesCommand.ts +++ b/clients/client-ssm/src/commands/DescribeInstancePatchesCommand.ts @@ -22,8 +22,8 @@ export interface DescribeInstancePatchesCommandInput extends DescribeInstancePat export interface DescribeInstancePatchesCommandOutput extends DescribeInstancePatchesResult, __MetadataBearer {} /** - *

                                                          Retrieves information about the patches on the specified instance and their state relative - * to the patch baseline being used for the instance.

                                                          + *

                                                          Retrieves information about the patches on the specified managed node and their state relative + * to the patch baseline being used for the node.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/DescribeMaintenanceWindowsForTargetCommand.ts b/clients/client-ssm/src/commands/DescribeMaintenanceWindowsForTargetCommand.ts index 4d54a5dd5c3e..4afeda860489 100644 --- a/clients/client-ssm/src/commands/DescribeMaintenanceWindowsForTargetCommand.ts +++ b/clients/client-ssm/src/commands/DescribeMaintenanceWindowsForTargetCommand.ts @@ -27,7 +27,7 @@ export interface DescribeMaintenanceWindowsForTargetCommandOutput __MetadataBearer {} /** - *

                                                          Retrieves information about the maintenance window targets or tasks that an instance is + *

                                                          Retrieves information about the maintenance window targets or tasks that a managed node is * associated with.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/GetCommandInvocationCommand.ts b/clients/client-ssm/src/commands/GetCommandInvocationCommand.ts index 3ce0bb959f64..49e42735a9bb 100644 --- a/clients/client-ssm/src/commands/GetCommandInvocationCommand.ts +++ b/clients/client-ssm/src/commands/GetCommandInvocationCommand.ts @@ -25,8 +25,8 @@ export interface GetCommandInvocationCommandOutput extends GetCommandInvocationR *

                                                          Returns detailed information about command execution for an invocation or plugin.

                                                          *

                                                          * GetCommandInvocation only gives the execution status of a plugin in a document. - * To get the command execution status on a specific instance, use ListCommandInvocations. To get the command execution status across instances, use - * ListCommands.

                                                          + * To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes, + * use ListCommands.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/GetConnectionStatusCommand.ts b/clients/client-ssm/src/commands/GetConnectionStatusCommand.ts index b30dc6bbb710..98899f80d0a1 100644 --- a/clients/client-ssm/src/commands/GetConnectionStatusCommand.ts +++ b/clients/client-ssm/src/commands/GetConnectionStatusCommand.ts @@ -22,7 +22,7 @@ export interface GetConnectionStatusCommandInput extends GetConnectionStatusRequ export interface GetConnectionStatusCommandOutput extends GetConnectionStatusResponse, __MetadataBearer {} /** - *

                                                          Retrieves the Session Manager connection status for an instance to determine whether it is running and + *

                                                          Retrieves the Session Manager connection status for a managed node to determine whether it is running and * ready to receive Session Manager connections.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/GetDeployablePatchSnapshotForInstanceCommand.ts b/clients/client-ssm/src/commands/GetDeployablePatchSnapshotForInstanceCommand.ts index ca239e64125c..9ec567b0bb6e 100644 --- a/clients/client-ssm/src/commands/GetDeployablePatchSnapshotForInstanceCommand.ts +++ b/clients/client-ssm/src/commands/GetDeployablePatchSnapshotForInstanceCommand.ts @@ -28,12 +28,12 @@ export interface GetDeployablePatchSnapshotForInstanceCommandOutput __MetadataBearer {} /** - *

                                                          Retrieves the current snapshot for the patch baseline the instance uses. This API is + *

                                                          Retrieves the current snapshot for the patch baseline the managed node uses. This API is * primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

                                                          * *

                                                          If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid * this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of - * Amazon Web Services Systems Manager, with an SSM document that enables you to target an instance with a script or command. + * Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command. * For example, run the command using the AWS-RunShellScript document or the * AWS-RunPowerShellScript document.

                                                          *
                                                          diff --git a/clients/client-ssm/src/commands/GetInventoryCommand.ts b/clients/client-ssm/src/commands/GetInventoryCommand.ts index cbdf9bf9b9e9..77273efaf263 100644 --- a/clients/client-ssm/src/commands/GetInventoryCommand.ts +++ b/clients/client-ssm/src/commands/GetInventoryCommand.ts @@ -23,7 +23,7 @@ export interface GetInventoryCommandInput extends GetInventoryRequest {} export interface GetInventoryCommandOutput extends GetInventoryResult, __MetadataBearer {} /** - *

                                                          Query inventory information. This includes instance status, such as Stopped or + *

                                                          Query inventory information. This includes managed node status, such as Stopped or * Terminated.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/ListAssociationsCommand.ts b/clients/client-ssm/src/commands/ListAssociationsCommand.ts index 9ccbbfa9ccac..9e2487c0cfc4 100644 --- a/clients/client-ssm/src/commands/ListAssociationsCommand.ts +++ b/clients/client-ssm/src/commands/ListAssociationsCommand.ts @@ -23,7 +23,7 @@ export interface ListAssociationsCommandOutput extends ListAssociationsResult, _ /** *

                                                          Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You - * can limit the results to a specific State Manager association document or instance by specifying + * can limit the results to a specific State Manager association document or managed node by specifying * a filter. State Manager is a capability of Amazon Web Services Systems Manager.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/ListCommandInvocationsCommand.ts b/clients/client-ssm/src/commands/ListCommandInvocationsCommand.ts index 46232d7a9e59..0a5294b57a4b 100644 --- a/clients/client-ssm/src/commands/ListCommandInvocationsCommand.ts +++ b/clients/client-ssm/src/commands/ListCommandInvocationsCommand.ts @@ -22,10 +22,10 @@ export interface ListCommandInvocationsCommandInput extends ListCommandInvocatio export interface ListCommandInvocationsCommandOutput extends ListCommandInvocationsResult, __MetadataBearer {} /** - *

                                                          An invocation is copy of a command sent to a specific instance. A command can apply to one - * or more instances. A command invocation applies to one instance. For example, if a user runs - * SendCommand against three instances, then a command invocation is created for each - * requested instance ID. ListCommandInvocations provide status about command + *

                                                          An invocation is copy of a command sent to a specific managed node. A command can apply to one + * or more managed nodes. A command invocation applies to one managed node. For example, if a user runs + * SendCommand against three managed nodes, then a command invocation is created for + * each requested managed node ID. ListCommandInvocations provide status about command * execution.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/PutComplianceItemsCommand.ts b/clients/client-ssm/src/commands/PutComplianceItemsCommand.ts index daa353e8febd..3e89dddfeb4c 100644 --- a/clients/client-ssm/src/commands/PutComplianceItemsCommand.ts +++ b/clients/client-ssm/src/commands/PutComplianceItemsCommand.ts @@ -37,7 +37,7 @@ export interface PutComplianceItemsCommandOutput extends PutComplianceItemsResul *
                                                        • *
                                                        • *

                                                          ExecutionTime. The time the patch, association, or custom compliance item was applied to - * the instance.

                                                          + * the managed node.

                                                          *
                                                        • *
                                                        • *

                                                          Id: The patch, association, or custom compliance ID.

                                                          diff --git a/clients/client-ssm/src/commands/PutInventoryCommand.ts b/clients/client-ssm/src/commands/PutInventoryCommand.ts index 0d5def49e726..d0b054b9d31c 100644 --- a/clients/client-ssm/src/commands/PutInventoryCommand.ts +++ b/clients/client-ssm/src/commands/PutInventoryCommand.ts @@ -22,7 +22,7 @@ export interface PutInventoryCommandInput extends PutInventoryRequest {} export interface PutInventoryCommandOutput extends PutInventoryResult, __MetadataBearer {} /** - *

                                                          Bulk update custom inventory items on one more instance. The request adds an inventory item, + *

                                                          Bulk update custom inventory items on one or more managed nodes. The request adds an inventory item, * if it doesn't already exist, or updates an inventory item, if it does exist.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. diff --git a/clients/client-ssm/src/commands/ResumeSessionCommand.ts b/clients/client-ssm/src/commands/ResumeSessionCommand.ts index b17721001bb3..cb53eefe533f 100644 --- a/clients/client-ssm/src/commands/ResumeSessionCommand.ts +++ b/clients/client-ssm/src/commands/ResumeSessionCommand.ts @@ -22,7 +22,7 @@ export interface ResumeSessionCommandInput extends ResumeSessionRequest {} export interface ResumeSessionCommandOutput extends ResumeSessionResponse, __MetadataBearer {} /** - *

                                                          Reconnects a session to an instance after it has been disconnected. Connections can be + *

                                                          Reconnects a session to a managed node after it has been disconnected. Connections can be * resumed for disconnected sessions, but not terminated sessions.

                                                          * *

                                                          This command is primarily for use by client machines to automatically reconnect during diff --git a/clients/client-ssm/src/commands/SendCommandCommand.ts b/clients/client-ssm/src/commands/SendCommandCommand.ts index bb6ec2f750fd..22b5894819ba 100644 --- a/clients/client-ssm/src/commands/SendCommandCommand.ts +++ b/clients/client-ssm/src/commands/SendCommandCommand.ts @@ -22,7 +22,7 @@ export interface SendCommandCommandInput extends SendCommandRequest {} export interface SendCommandCommandOutput extends SendCommandResult, __MetadataBearer {} /** - *

                                                          Runs commands on one or more managed instances.

                                                          + *

                                                          Runs commands on one or more managed nodes.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/StartSessionCommand.ts b/clients/client-ssm/src/commands/StartSessionCommand.ts index 44ecabc719df..a0e0dc2ad10b 100644 --- a/clients/client-ssm/src/commands/StartSessionCommand.ts +++ b/clients/client-ssm/src/commands/StartSessionCommand.ts @@ -22,7 +22,7 @@ export interface StartSessionCommandInput extends StartSessionRequest {} export interface StartSessionCommandOutput extends StartSessionResponse, __MetadataBearer {} /** - *

                                                          Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a + *

                                                          Initiates a connection to a target (for example, a managed node) for a Session Manager session. Returns a * URL and token that can be used to open a WebSocket connection for sending input and receiving * outputs.

                                                          * diff --git a/clients/client-ssm/src/commands/TerminateSessionCommand.ts b/clients/client-ssm/src/commands/TerminateSessionCommand.ts index 8501eb0391e7..2030855048f7 100644 --- a/clients/client-ssm/src/commands/TerminateSessionCommand.ts +++ b/clients/client-ssm/src/commands/TerminateSessionCommand.ts @@ -23,7 +23,7 @@ export interface TerminateSessionCommandOutput extends TerminateSessionResponse, /** *

                                                          Permanently ends a session and closes the data connection between the Session Manager client and - * SSM Agent on the instance. A terminated session isn't be resumed.

                                                          + * SSM Agent on the managed node. A terminated session isn't be resumed.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/commands/UpdateAssociationStatusCommand.ts b/clients/client-ssm/src/commands/UpdateAssociationStatusCommand.ts index a956fd661fe8..f261a447112d 100644 --- a/clients/client-ssm/src/commands/UpdateAssociationStatusCommand.ts +++ b/clients/client-ssm/src/commands/UpdateAssociationStatusCommand.ts @@ -23,7 +23,7 @@ export interface UpdateAssociationStatusCommandOutput extends UpdateAssociationS /** *

                                                          Updates the status of the Amazon Web Services Systems Manager document (SSM document) associated with the specified - * instance.

                                                          + * managed node.

                                                          *

                                                          * UpdateAssociationStatus is primarily used by the Amazon Web Services Systems Manager Agent (SSM Agent) to * report status updates about your associations and is only used for associations created with the diff --git a/clients/client-ssm/src/commands/UpdateDocumentMetadataCommand.ts b/clients/client-ssm/src/commands/UpdateDocumentMetadataCommand.ts index 95dd87641b32..2a5015d28438 100644 --- a/clients/client-ssm/src/commands/UpdateDocumentMetadataCommand.ts +++ b/clients/client-ssm/src/commands/UpdateDocumentMetadataCommand.ts @@ -11,7 +11,8 @@ import { SerdeContext as __SerdeContext, } from "@aws-sdk/types"; -import { UpdateDocumentMetadataRequest, UpdateDocumentMetadataResponse } from "../models/models_1"; +import { UpdateDocumentMetadataRequest } from "../models/models_1"; +import { UpdateDocumentMetadataResponse } from "../models/models_2"; import { deserializeAws_json1_1UpdateDocumentMetadataCommand, serializeAws_json1_1UpdateDocumentMetadataCommand, diff --git a/clients/client-ssm/src/commands/UpdateManagedInstanceRoleCommand.ts b/clients/client-ssm/src/commands/UpdateManagedInstanceRoleCommand.ts index f25987d17a57..43b5cbc718ad 100644 --- a/clients/client-ssm/src/commands/UpdateManagedInstanceRoleCommand.ts +++ b/clients/client-ssm/src/commands/UpdateManagedInstanceRoleCommand.ts @@ -23,8 +23,8 @@ export interface UpdateManagedInstanceRoleCommandOutput extends UpdateManagedIns /** *

                                                          Changes the Identity and Access Management (IAM) role that is assigned to the - * on-premises instance or virtual machines (VM). IAM roles are first assigned to - * these hybrid instances during the activation process. For more information, see CreateActivation.

                                                          + * on-premises server, edge device, or virtual machines (VM). IAM roles are first + * assigned to these hybrid nodes during the activation process. For more information, see CreateActivation.

                                                          * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-ssm/src/models/models_0.ts b/clients/client-ssm/src/models/models_0.ts index 76d64f045eca..067a0210b3eb 100644 --- a/clients/client-ssm/src/models/models_0.ts +++ b/clients/client-ssm/src/models/models_0.ts @@ -29,7 +29,7 @@ export namespace AccountSharingInfo { /** *

                                                          Metadata that you assign to your Amazon Web Services resources. Tags enable you to categorize your * resources in different ways, for example, by purpose, owner, or environment. In Amazon Web Services Systems Manager, you - * can apply tags to Systems Manager documents (SSM documents), managed instances, maintenance windows, + * can apply tags to Systems Manager documents (SSM documents), managed nodes, maintenance windows, * parameters, patch baselines, OpsItems, and OpsMetadata.

                                                          */ export interface Tag { @@ -56,7 +56,7 @@ export namespace Tag { /** *

                                                          An activation registers one or more on-premises servers or virtual machines (VMs) with Amazon Web Services * so that you can configure those servers or VMs using Run Command. A server or VM that has been - * registered with Amazon Web Services Systems Manager is called a managed instance.

                                                          + * registered with Amazon Web Services Systems Manager is called a managed node.

                                                          */ export interface Activation { /** @@ -70,28 +70,28 @@ export interface Activation { Description?: string; /** - *

                                                          A name for the managed instance when it is created.

                                                          + *

                                                          A name for the managed node when it is created.

                                                          */ DefaultInstanceName?: string; /** *

                                                          The Identity and Access Management (IAM) role to assign to the managed - * instance.

                                                          + * node.

                                                          */ IamRole?: string; /** - *

                                                          The maximum number of managed instances that can be registered using this activation.

                                                          + *

                                                          The maximum number of managed nodes that can be registered using this activation.

                                                          */ RegistrationLimit?: number; /** - *

                                                          The number of managed instances already registered with this activation.

                                                          + *

                                                          The number of managed nodes already registered with this activation.

                                                          */ RegistrationsCount?: number; /** - *

                                                          The date when this activation can no longer be used to register managed instances.

                                                          + *

                                                          The date when this activation can no longer be used to register managed nodes.

                                                          */ ExpirationDate?: Date; @@ -135,7 +135,7 @@ export interface AddTagsToResourceRequest { *

                                                          Specifies the type of resource you are tagging.

                                                          * *

                                                          The ManagedInstance type for this API operation is for on-premises managed - * instances. You must specify the name of the managed instance in the following format: + * nodes. You must specify the name of the managed node in the following format: * mi-ID_number * . For example, * mi-1a2b3c4d5e6f.

                                                          @@ -167,7 +167,7 @@ export interface AddTagsToResourceRequest { *

                                                          * *

                                                          The ManagedInstance type for this API operation is only for on-premises - * managed instances. You must specify the name of the managed instance in the following format: + * managed nodes. You must specify the name of the managed node in the following format: * mi-ID_number * . For example, * mi-1a2b3c4d5e6f.

                                                          @@ -240,8 +240,8 @@ export namespace InvalidResourceId { } /** - *

                                                          The resource type isn't valid. For example, if you are attempting to tag an instance, the - * instance must be a registered, managed instance.

                                                          + *

                                                          The resource type isn't valid. For example, if you are attempting to tag an EC2 instance, the + * instance must be a registered managed node.

                                                          */ export interface InvalidResourceType extends __SmithyException, $MetadataBearer { name: "InvalidResourceType"; @@ -456,8 +456,8 @@ export interface CancelCommandRequest { CommandId: string | undefined; /** - *

                                                          (Optional) A list of instance IDs on which you want to cancel the command. If not provided, - * the command is canceled on every instance on which it was requested.

                                                          + *

                                                          (Optional) A list of managed node IDs on which you want to cancel the command. If not provided, + * the command is canceled on every node on which it was requested.

                                                          */ InstanceIds?: string[]; } @@ -487,7 +487,7 @@ export namespace CancelCommandResult { } /** - *

                                                          You can't specify an instance ID in more than one association.

                                                          + *

                                                          You can't specify a managed node ID in more than one association.

                                                          */ export interface DuplicateInstanceId extends __SmithyException, $MetadataBearer { name: "DuplicateInstanceId"; @@ -524,7 +524,7 @@ export namespace InvalidCommandId { *

                                                          The following problems can cause this exception:

                                                          *
                                                            *
                                                          • - *

                                                            You don't have permission to access the instance.

                                                            + *

                                                            You don't have permission to access the managed node.

                                                            *
                                                          • *
                                                          • *

                                                            Amazon Web Services Systems Manager Agent(SSM Agent) isn't running. Verify that SSM Agent is @@ -534,7 +534,7 @@ export namespace InvalidCommandId { *

                                                            SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM Agent.

                                                            *
                                                          • *
                                                          • - *

                                                            The instance isn't in valid state. Valid states are: Running, + *

                                                            The managed node isn't in valid state. Valid states are: Running, * Pending, Stopped, and Stopping. Invalid states are: * Shutting-down and Terminated.

                                                            *
                                                          • @@ -642,8 +642,8 @@ export interface CreateActivationRequest { Description?: string; /** - *

                                                            The name of the registered, managed instance as it will appear in the Amazon Web Services Systems Manager console or - * when you use the Amazon Web Services command line tools to list Systems Manager resources.

                                                            + *

                                                            The name of the registered, managed node as it will appear in the Amazon Web Services Systems Manager console or when + * you use the Amazon Web Services command line tools to list Systems Manager resources.

                                                            * *

                                                            Don't enter personally identifiable information in this field.

                                                            *
                                                            @@ -652,7 +652,7 @@ export interface CreateActivationRequest { /** *

                                                            The name of the Identity and Access Management (IAM) role that you want to assign to - * the managed instance. This IAM role must provide AssumeRole permissions for the + * the managed node. This IAM role must provide AssumeRole permissions for the * Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an * IAM service role for a hybrid environment in the * Amazon Web Services Systems Manager User Guide.

                                                            @@ -660,7 +660,7 @@ export interface CreateActivationRequest { IamRole: string | undefined; /** - *

                                                            Specify the maximum number of managed instances you want to register. The default value is + *

                                                            Specify the maximum number of managed nodes you want to register. The default value is * 1.

                                                            */ RegistrationLimit?: number; @@ -695,10 +695,10 @@ export interface CreateActivationRequest { * automatically applied to the on-premises servers or VMs.

                                                            * *

                                                            You can't add tags to or delete tags from an existing activation. You can tag your - * on-premises servers and VMs after they connect to Systems Manager for the first time and are assigned a - * managed instance ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is - * prefixed with "mi-". For information about how to add tags to your managed instances, see AddTagsToResource. For information about how to remove tags from your managed - * instances, see RemoveTagsFromResource.

                                                            + * on-premises servers, edge devices, and VMs after they connect to Systems Manager for the first time and are assigned a + * managed node ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is + * prefixed with "mi-". For information about how to add tags to your managed nodes, see AddTagsToResource. For information about how to remove tags from your managed nodes, + * see RemoveTagsFromResource.

                                                            */ Tags?: Tag[]; @@ -900,7 +900,7 @@ export namespace TargetLocation { } /** - *

                                                            An array of search criteria that targets instances using a key-value pair that you + *

                                                            An array of search criteria that targets managed nodes using a key-value pair that you * specify.

                                                            * *

                                                            One or more targets must be specified for maintenance window Run Command-type tasks. @@ -914,41 +914,35 @@ export namespace TargetLocation { *

                                                              *
                                                            • *

                                                              - * Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 - * + * Key=InstanceIds,Values=,, *

                                                              *
                                                            • *
                                                            • *

                                                              - * Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 - * + * Key=tag:,Values=, *

                                                              *
                                                            • *
                                                            • *

                                                              - * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 - * + * Key=tag-key,Values=, *

                                                              *
                                                            • *
                                                            • *

                                                              * Run Command and Maintenance window targets only: - * Key=resource-groups:Name,Values=resource-group-name - * + * Key=resource-groups:Name,Values= *

                                                              *
                                                            • *
                                                            • *

                                                              * Maintenance window targets only: - * Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 - * + * Key=resource-groups:ResourceTypeFilters,Values=, *

                                                              *
                                                            • *
                                                            • *

                                                              * Automation targets only: - * Key=ResourceGroup;Values=resource-group-name - * + * Key=ResourceGroup;Values= *

                                                              *
                                                            • *
                                                            @@ -979,8 +973,7 @@ export namespace TargetLocation { *
                                                          • *

                                                            * Maintenance window targets only: - * Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC - * + * Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC *

                                                            *

                                                            This example demonstrates how to target only Amazon Elastic Compute Cloud (Amazon EC2) * instances and VPCs in your maintenance window.

                                                            @@ -994,19 +987,18 @@ export namespace TargetLocation { *
                                                          • *

                                                            * State Manager association targets only: - * Key=InstanceIds,Values=* - * + * Key=InstanceIds,Values=* *

                                                            *

                                                            This example demonstrates how to target all managed instances in the Amazon Web Services Region where * the association was created.

                                                            *
                                                          • *
                                                          - *

                                                          For more information about how to send commands that target instances using + *

                                                          For more information about how to send commands that target managed nodes using * Key,Value parameters, see Targeting multiple instances in the Amazon Web Services Systems Manager User Guide.

                                                          */ export interface Target { /** - *

                                                          User-defined criteria for sending commands that target instances that meet the + *

                                                          User-defined criteria for sending commands that target managed nodes that meet the * criteria.

                                                          */ Key?: string; @@ -1033,7 +1025,7 @@ export namespace Target { export interface CreateAssociationRequest { /** *

                                                          The name of the SSM Command document or Automation runbook that contains the configuration - * information for the instance.

                                                          + * information for the managed node.

                                                          *

                                                          You can specify Amazon Web Services-predefined documents, documents you created, or a document that is * shared with you from another account.

                                                          *

                                                          For Systems Manager documents (SSM documents) that are shared with you from other Amazon Web Services accounts, you @@ -1059,10 +1051,10 @@ export interface CreateAssociationRequest { DocumentVersion?: string; /** - *

                                                          The instance ID.

                                                          + *

                                                          The managed node ID.

                                                          * *

                                                          - * InstanceId has been deprecated. To specify an instance ID for an association, + * InstanceId has been deprecated. To specify a managed node ID for an association, * use the Targets parameter. Requests that include the * parameter InstanceID with Systems Manager documents (SSM documents) that use schema version * 2.0 or later will fail. In addition, if you use the parameter @@ -1080,9 +1072,9 @@ export interface CreateAssociationRequest { Parameters?: { [key: string]: string[] }; /** - *

                                                          The targets for the association. You can target instances by using tags, Amazon Web Services resource - * groups, all instances in an Amazon Web Services account, or individual instance IDs. You can target all - * instances in an Amazon Web Services account by specifying the InstanceIds key with a value of + *

                                                          The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource + * groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all + * managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of * *. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the * Amazon Web Services Systems Manager User Guide.

                                                          */ @@ -1117,8 +1109,8 @@ export interface CreateAssociationRequest { * example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, * the system stops sending requests when the fourth error is received. If you specify 0, then the * system stops sending requests after the first error is returned. If you run an association on 50 - * instances and set MaxError to 10%, then the system stops sending the request when - * the sixth error is received.

                                                          + * managed nodes and set MaxError to 10%, then the system stops sending the request + * when the sixth error is received.

                                                          *

                                                          Executions that are already running an association when MaxErrors is reached * are allowed to complete, but some of these executions may fail as well. If you need to ensure * that there won't be more than max-errors failed executions, set MaxConcurrency to 1 @@ -1130,9 +1122,9 @@ export interface CreateAssociationRequest { *

                                                          The maximum number of targets allowed to run the association at the same time. You can * specify a number, for example 10, or a percentage of the target set, for example 10%. The default * value is 100%, which means all targets run the association at the same time.

                                                          - *

                                                          If a new instance starts and attempts to run an association while Systems Manager is running + *

                                                          If a new managed node starts and attempts to run an association while Systems Manager is running * MaxConcurrency associations, the association is allowed to run. During the next - * association interval, the new instance will process its association within the limit specified + * association interval, the new managed node will process its association within the limit specified * for MaxConcurrency.

                                                          */ MaxConcurrency?: string; @@ -1205,8 +1197,8 @@ export interface AssociationOverview { /** *

                                                          Returns the number of targets for the association status. For example, if you created an - * association with two instances, and one of them was successful, this would return the count of - * instances by status.

                                                          + * association with two managed nodes, and one of them was successful, this would return the count + * of managed nodes by status.

                                                          */ AssociationStatusAggregatedCount?: { [key: string]: number }; } @@ -1270,7 +1262,7 @@ export interface AssociationDescription { Name?: string; /** - *

                                                          The instance ID.

                                                          + *

                                                          The managed node ID.

                                                          */ InstanceId?: string; @@ -1322,7 +1314,7 @@ export interface AssociationDescription { AssociationId?: string; /** - *

                                                          The instances targeted by the request.

                                                          + *

                                                          The managed nodes targeted by the request.

                                                          */ Targets?: Target[]; @@ -1357,8 +1349,8 @@ export interface AssociationDescription { * example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, * the system stops sending requests when the fourth error is received. If you specify 0, then the * system stops sending requests after the first error is returned. If you run an association on 50 - * instances and set MaxError to 10%, then the system stops sending the request when - * the sixth error is received.

                                                          + * managed nodes and set MaxError to 10%, then the system stops sending the request + * when the sixth error is received.

                                                          *

                                                          Executions that are already running an association when MaxErrors is reached * are allowed to complete, but some of these executions may fail as well. If you need to ensure * that there won't be more than max-errors failed executions, set MaxConcurrency to 1 @@ -1370,9 +1362,9 @@ export interface AssociationDescription { *

                                                          The maximum number of targets allowed to run the association at the same time. You can * specify a number, for example 10, or a percentage of the target set, for example 10%. The default * value is 100%, which means all targets run the association at the same time.

                                                          - *

                                                          If a new instance starts and attempts to run an association while Systems Manager is running + *

                                                          If a new managed node starts and attempts to run an association while Systems Manager is running * MaxConcurrency associations, the association is allowed to run. During the next - * association interval, the new instance will process its association within the limit specified + * association interval, the new managed node will process its association within the limit specified * for MaxConcurrency.

                                                          */ MaxConcurrency?: string; @@ -1543,8 +1535,8 @@ export namespace InvalidTarget { } /** - *

                                                          The document doesn't support the platform type of the given instance ID(s). For example, you - * sent an document for a Windows instance to a Linux instance.

                                                          + *

                                                          The document doesn't support the platform type of the given managed node ID(s). For example, you + * sent an document for a Windows managed node to a Linux node.

                                                          */ export interface UnsupportedPlatformType extends __SmithyException, $MetadataBearer { name: "UnsupportedPlatformType"; @@ -1562,11 +1554,11 @@ export namespace UnsupportedPlatformType { } /** - *

                                                          Describes the association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

                                                          + *

                                                          Describes the association of a Amazon Web Services Systems Manager document (SSM document) and a managed node.

                                                          */ export interface CreateAssociationBatchRequestEntry { /** - *

                                                          The name of the SSM document that contains the configuration information for the instance. + *

                                                          The name of the SSM document that contains the configuration information for the managed node. * You can specify Command or Automation runbooks.

                                                          *

                                                          You can specify Amazon Web Services-predefined documents, documents you created, or a document that is * shared with you from another account.

                                                          @@ -1587,10 +1579,10 @@ export interface CreateAssociationBatchRequestEntry { Name: string | undefined; /** - *

                                                          The instance ID.

                                                          + *

                                                          The managed node ID.

                                                          * *

                                                          - * InstanceId has been deprecated. To specify an instance ID for an association, + * InstanceId has been deprecated. To specify a managed node ID for an association, * use the Targets parameter. Requests that include the * parameter InstanceID with Systems Manager documents (SSM documents) that use schema version * 2.0 or later will fail. In addition, if you use the parameter @@ -1620,7 +1612,7 @@ export interface CreateAssociationBatchRequestEntry { DocumentVersion?: string; /** - *

                                                          The instances targeted by the request.

                                                          + *

                                                          The managed nodes targeted by the request.

                                                          */ Targets?: Target[]; @@ -1645,8 +1637,8 @@ export interface CreateAssociationBatchRequestEntry { * example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, * the system stops sending requests when the fourth error is received. If you specify 0, then the * system stops sending requests after the first error is returned. If you run an association on 50 - * instances and set MaxError to 10%, then the system stops sending the request when - * the sixth error is received.

                                                          + * managed nodes and set MaxError to 10%, then the system stops sending the request + * when the sixth error is received.

                                                          *

                                                          Executions that are already running an association when MaxErrors is reached * are allowed to complete, but some of these executions may fail as well. If you need to ensure * that there won't be more than max-errors failed executions, set MaxConcurrency to 1 @@ -1658,9 +1650,9 @@ export interface CreateAssociationBatchRequestEntry { *

                                                          The maximum number of targets allowed to run the association at the same time. You can * specify a number, for example 10, or a percentage of the target set, for example 10%. The default * value is 100%, which means all targets run the association at the same time.

                                                          - *

                                                          If a new instance starts and attempts to run an association while Systems Manager is running + *

                                                          If a new managed node starts and attempts to run an association while Systems Manager is running * MaxConcurrency associations, the association is allowed to run. During the next - * association interval, the new instance will process its association within the limit specified + * association interval, the new managed node will process its association within the limit specified * for MaxConcurrency.

                                                          */ MaxConcurrency?: string; @@ -2109,6 +2101,7 @@ export namespace DocumentParameter { export enum PlatformType { LINUX = "Linux", + MACOS = "MacOS", WINDOWS = "Windows", } @@ -2497,9 +2490,9 @@ export interface CreateMaintenanceWindowRequest { Cutoff: number | undefined; /** - *

                                                          Enables a maintenance window task to run on managed instances, even if you haven't - * registered those instances as targets. If enabled, then you must specify the unregistered - * instances (by instance ID) when you register a task with the maintenance window.

                                                          + *

                                                          Enables a maintenance window task to run on managed nodes, even if you haven't registered + * those nodes as targets. If enabled, then you must specify the unregistered managed nodes (by + * node ID) when you register a task with the maintenance window.

                                                          *

                                                          If you don't enable this option, then you must specify previously-registered targets when * you register a task with the maintenance window.

                                                          */ @@ -3110,9 +3103,9 @@ export interface PatchRule { ApproveUntilDate?: string; /** - *

                                                          For instances identified by the approval rule filters, enables a patch baseline to apply + *

                                                          For managed nodes identified by the approval rule filters, enables a patch baseline to apply * non-security updates available in the specified repository. The default value is - * false. Applies to Linux instances only.

                                                          + * false. Applies to Linux managed nodes only.

                                                          */ EnableNonSecurity?: boolean; } @@ -3165,8 +3158,8 @@ export enum PatchAction { } /** - *

                                                          Information about the patches to use to update the instances, including target operating - * systems and source repository. Applies to Linux instances only.

                                                          + *

                                                          Information about the patches to use to update the managed nodes, including target operating + * systems and source repository. Applies to Linux managed nodes only.

                                                          */ export interface PatchSource { /** @@ -3252,8 +3245,8 @@ export interface CreatePatchBaselineRequest { /** *

                                                          Indicates whether the list of approved patches includes non-security updates that should be - * applied to the instances. The default value is false. Applies to Linux instances - * only.

                                                          + * applied to the managed nodes. The default value is false. Applies to Linux managed + * nodes only.

                                                          */ ApprovedPatchesEnableNonSecurity?: boolean; @@ -3298,8 +3291,8 @@ export interface CreatePatchBaselineRequest { Description?: string; /** - *

                                                          Information about the patches to use to update the instances, including target operating - * systems and source repositories. Applies to Linux instances only.

                                                          + *

                                                          Information about the patches to use to update the managed nodes, including target operating + * systems and source repositories. Applies to Linux managed nodes only.

                                                          */ Sources?: PatchSource[]; @@ -3716,10 +3709,10 @@ export interface DeleteAssociationRequest { Name?: string; /** - *

                                                          The instance ID.

                                                          + *

                                                          The managed node ID.

                                                          * *

                                                          - * InstanceId has been deprecated. To specify an instance ID for an association, + * InstanceId has been deprecated. To specify a managed node ID for an association, * use the Targets parameter. Requests that include the * parameter InstanceID with Systems Manager documents (SSM documents) that use schema version * 2.0 or later will fail. In addition, if you use the parameter @@ -3758,7 +3751,7 @@ export namespace DeleteAssociationResult { } /** - *

                                                          You must disassociate a document from all instances before you can delete it.

                                                          + *

                                                          You must disassociate a document from all managed nodes before you can delete it.

                                                          */ export interface AssociatedInstances extends __SmithyException, $MetadataBearer { name: "AssociatedInstances"; @@ -4312,7 +4305,7 @@ export namespace ResourceDataSyncNotFoundException { export interface DeregisterManagedInstanceRequest { /** - *

                                                          The ID assigned to the managed instance when you registered it using the activation process. + *

                                                          The ID assigned to the managed node when you registered it using the activation process. *

                                                          */ InstanceId: string | undefined; @@ -4612,7 +4605,7 @@ export interface DescribeAssociationRequest { Name?: string; /** - *

                                                          The instance ID.

                                                          + *

                                                          The managed node ID.

                                                          */ InstanceId?: string; @@ -4624,7 +4617,7 @@ export interface DescribeAssociationRequest { /** *

                                                          Specify the association version to retrieve. To view the latest version, either specify * $LATEST for this parameter, or omit this parameter. To view a list of all - * associations for an instance, use ListAssociations. To get a list of versions + * associations for a managed node, use ListAssociations. To get a list of versions * for a specific association, use ListAssociationVersions.

                                                          */ AssociationVersion?: string; @@ -4962,12 +4955,12 @@ export interface AssociationExecutionTarget { ExecutionId?: string; /** - *

                                                          The resource ID, for example, the instance ID where the association ran.

                                                          + *

                                                          The resource ID, for example, the managed node ID where the association ran.

                                                          */ ResourceId?: string; /** - *

                                                          The resource type, for example, instance.

                                                          + *

                                                          The resource type, for example, EC2.

                                                          */ ResourceType?: string; @@ -5772,7 +5765,7 @@ export interface DescribeAvailablePatchesRequest { *

                                                          * Windows Server *

                                                          - *

                                                          Supported keys for Windows Server instance patches include the following:

                                                          + *

                                                          Supported keys for Windows Server managed node patches include the following:

                                                          *
                                                            *
                                                          • *

                                                            @@ -5848,7 +5841,7 @@ export interface DescribeAvailablePatchesRequest { * Key=CVE_ID,Values=CVE-2018-3615 *

                                                            * - *

                                                            Supported keys for Linux instance patches include the following:

                                                            + *

                                                            Supported keys for Linux managed node patches include the following:

                                                            *
                                                              *
                                                            • *

                                                              @@ -6054,52 +6047,52 @@ export interface Patch { /** *

                                                              The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to - * Linux-based instances only.

                                                              + * Linux-based managed nodes only.

                                                              */ AdvisoryIds?: string[]; /** *

                                                              The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based - * instances only.

                                                              + * managed nodes only.

                                                              */ BugzillaIds?: string[]; /** *

                                                              The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example, - * CVE-2011-3192. Applies to Linux-based instances only.

                                                              + * CVE-2011-3192. Applies to Linux-based managed nodes only.

                                                              */ CVEIds?: string[]; /** - *

                                                              The name of the patch. Applies to Linux-based instances only.

                                                              + *

                                                              The name of the patch. Applies to Linux-based managed nodes only.

                                                              */ Name?: string; /** *

                                                              The epoch of the patch. For example in * pkg-example-EE-20180914-2.2.amzn1.noarch, the epoch value is - * 20180914-2. Applies to Linux-based instances only.

                                                              + * 20180914-2. Applies to Linux-based managed nodes only.

                                                              */ Epoch?: number; /** *

                                                              The version number of the patch. For example, in * example-pkg-1.710.10-2.7.abcd.x86_64, the version number is indicated by - * -1. Applies to Linux-based instances only.

                                                              + * -1. Applies to Linux-based managed nodes only.

                                                              */ Version?: string; /** *

                                                              The particular release of a patch. For example, in * pkg-example-EE-20180914-2.2.amzn1.noarch, the release is 2.amaz1. - * Applies to Linux-based instances only.

                                                              + * Applies to Linux-based managed nodes only.

                                                              */ Release?: string; /** *

                                                              The architecture of the patch. For example, in * example-pkg-0.710.10-2.7.abcd.x86_64, the architecture is indicated by - * x86_64. Applies to Linux-based instances only.

                                                              + * x86_64. Applies to Linux-based managed nodes only.

                                                              */ Arch?: string; @@ -6112,7 +6105,7 @@ export interface Patch { /** *

                                                              The source patch repository for the operating system and version, such as * trusty-security for Ubuntu Server 14.04 LTE and focal-security for - * Ubuntu Server 20.04 LTE. Applies to Linux-based instances only.

                                                              + * Ubuntu Server 20.04 LTE. Applies to Linux-based managed nodes only.

                                                              */ Repository?: string; } @@ -6281,7 +6274,7 @@ export namespace InvalidPermissionType { export interface DescribeEffectiveInstanceAssociationsRequest { /** - *

                                                              The instance ID for which you want to view all associations.

                                                              + *

                                                              The managed node ID for which you want to view all associations.

                                                              */ InstanceId: string | undefined; @@ -6308,7 +6301,7 @@ export namespace DescribeEffectiveInstanceAssociationsRequest { } /** - *

                                                              One or more association documents on the instance.

                                                              + *

                                                              One or more association documents on the managed node.

                                                              */ export interface InstanceAssociation { /** @@ -6317,17 +6310,17 @@ export interface InstanceAssociation { AssociationId?: string; /** - *

                                                              The instance ID.

                                                              + *

                                                              The managed node ID.

                                                              */ InstanceId?: string; /** - *

                                                              The content of the association document for the instance(s).

                                                              + *

                                                              The content of the association document for the managed node(s).

                                                              */ Content?: string; /** - *

                                                              Version information for the association on the instance.

                                                              + *

                                                              Version information for the association on the managed node.

                                                              */ AssociationVersion?: string; } @@ -6343,7 +6336,7 @@ export namespace InstanceAssociation { export interface DescribeEffectiveInstanceAssociationsResult { /** - *

                                                              The associations for the requested instance.

                                                              + *

                                                              The associations for the requested managed node.

                                                              */ Associations?: InstanceAssociation[]; @@ -6501,7 +6494,7 @@ export namespace UnsupportedOperatingSystem { export interface DescribeInstanceAssociationsStatusRequest { /** - *

                                                              The instance IDs for which you want association status information.

                                                              + *

                                                              The managed node IDs for which you want association status information.

                                                              */ InstanceId: string | undefined; @@ -6567,7 +6560,7 @@ export namespace InstanceAssociationOutputUrl { } /** - *

                                                              Status information about the instance association.

                                                              + *

                                                              Status information about the association.

                                                              */ export interface InstanceAssociationStatusInfo { /** @@ -6586,27 +6579,27 @@ export interface InstanceAssociationStatusInfo { DocumentVersion?: string; /** - *

                                                              The version of the association applied to the instance.

                                                              + *

                                                              The version of the association applied to the managed node.

                                                              */ AssociationVersion?: string; /** - *

                                                              The instance ID where the association was created.

                                                              + *

                                                              The managed node ID where the association was created.

                                                              */ InstanceId?: string; /** - *

                                                              The date the instance association ran.

                                                              + *

                                                              The date the association ran.

                                                              */ ExecutionDate?: Date; /** - *

                                                              Status information about the instance association.

                                                              + *

                                                              Status information about the association.

                                                              */ Status?: string; /** - *

                                                              Detailed status information about the instance association.

                                                              + *

                                                              Detailed status information about the association.

                                                              */ DetailedStatus?: string; @@ -6626,7 +6619,7 @@ export interface InstanceAssociationStatusInfo { OutputUrl?: InstanceAssociationOutputUrl; /** - *

                                                              The name of the association applied to the instance.

                                                              + *

                                                              The name of the association applied to the managed node.

                                                              */ AssociationName?: string; } @@ -6663,11 +6656,11 @@ export namespace DescribeInstanceAssociationsStatusResult { } /** - *

                                                              The filters to describe or get information about your managed instances.

                                                              + *

                                                              The filters to describe or get information about your managed nodes.

                                                              */ export interface InstanceInformationStringFilter { /** - *

                                                              The filter key name to describe your instances. For example:

                                                              + *

                                                              The filter key name to describe your managed nodes. For example:

                                                              *

                                                              "InstanceIds"|"AgentVersion"|"PingStatus"|"PlatformTypes"|"ActivationIds"|"IamRole"|"ResourceType"|"AssociationStatus"|"Tag * Key"

                                                              * @@ -6707,7 +6700,7 @@ export enum InstanceInformationFilterKey { } /** - *

                                                              Describes a filter for a specific list of instances. You can filter instances information by + *

                                                              Describes a filter for a specific list of managed nodes. You can filter node information by * using tags. You specify tags by using a key-value mapping.

                                                              *

                                                              Use this operation instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The * InstanceInformationFilterList method is a legacy method and doesn't support tags. @@ -6737,8 +6730,8 @@ export namespace InstanceInformationFilter { export interface DescribeInstanceInformationRequest { /** *

                                                              This is a legacy method. We recommend that you don't use this method. Instead, use the - * Filters data type. Filters enables you to return instance information - * by filtering based on tags applied to managed instances.

                                                              + * Filters data type. Filters enables you to return node information + * by filtering based on tags applied to managed nodes.

                                                              * *

                                                              Attempting to use InstanceInformationFilterList and Filters leads * to an exception error.

                                                              @@ -6747,7 +6740,7 @@ export interface DescribeInstanceInformationRequest { InstanceInformationFilterList?: InstanceInformationFilter[]; /** - *

                                                              One or more filters. Use a filter to return a more specific list of instances. You can + *

                                                              One or more filters. Use a filter to return a more specific list of managed nodes. You can * filter based on tags applied to EC2 instances. Use this Filters data type instead of * InstanceInformationFilterList, which is deprecated.

                                                              */ @@ -6785,7 +6778,7 @@ export interface InstanceAggregatedAssociationOverview { DetailedStatus?: string; /** - *

                                                              The number of associations for the instance(s).

                                                              + *

                                                              The number of associations for the managed node(s).

                                                              */ InstanceAssociationStatusAggregatedCount?: { [key: string]: number }; } @@ -6811,12 +6804,18 @@ export enum ResourceType { MANAGED_INSTANCE = "ManagedInstance", } +export enum SourceType { + AWS_EC2_INSTANCE = "AWS::EC2::Instance", + AWS_IOT_THING = "AWS::IoT::Thing", + AWS_SSM_MANAGEDINSTANCE = "AWS::SSM::ManagedInstance", +} + /** - *

                                                              Describes a filter for a specific list of instances.

                                                              + *

                                                              Describes a filter for a specific list of managed nodes.

                                                              */ export interface InstanceInformation { /** - *

                                                              The instance ID.

                                                              + *

                                                              The managed node ID.

                                                              */ InstanceId?: string; @@ -6834,15 +6833,15 @@ export interface InstanceInformation { LastPingDateTime?: Date; /** - *

                                                              The version of SSM Agent running on your Linux instance.

                                                              + *

                                                              The version of SSM Agent running on your Linux managed node.

                                                              */ AgentVersion?: string; /** - *

                                                              Indicates whether the latest version of SSM Agent is running on your Linux Managed Instance. - * This field doesn't indicate whether or not the latest version is installed on Windows managed - * instances, because some older versions of Windows Server use the EC2Config service to process - * Systems Manager requests.

                                                              + *

                                                              Indicates whether the latest version of SSM Agent is running on your Linux managed node. This + * field doesn't indicate whether or not the latest version is installed on Windows managed nodes, + * because some older versions of Windows Server use the EC2Config service to process Systems Manager + * requests.

                                                              */ IsLatestVersion?: boolean; @@ -6852,12 +6851,12 @@ export interface InstanceInformation { PlatformType?: PlatformType | string; /** - *

                                                              The name of the operating system platform running on your instance.

                                                              + *

                                                              The name of the operating system platform running on your managed node.

                                                              */ PlatformName?: string; /** - *

                                                              The version of the OS platform running on your instance.

                                                              + *

                                                              The version of the OS platform running on your managed node.

                                                              */ PlatformVersion?: string; @@ -6869,7 +6868,7 @@ export interface InstanceInformation { /** *

                                                              The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager - * managed instance. This call doesn't return the IAM role for Amazon Elastic Compute Cloud + * managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud * (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use * the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in * the Amazon Web Services CLI Command Reference.

                                                              @@ -6877,7 +6876,7 @@ export interface InstanceInformation { IamRole?: string; /** - *

                                                              The date the server or VM was registered with Amazon Web Services as a managed instance.

                                                              + *

                                                              The date the server or VM was registered with Amazon Web Services as a managed node.

                                                              */ RegistrationDate?: Date; @@ -6887,26 +6886,25 @@ export interface InstanceInformation { ResourceType?: ResourceType | string; /** - *

                                                              The name assigned to an on-premises server or virtual machine (VM) when it is activated as a - * Systems Manager managed instance. The name is specified as the DefaultInstanceName property - * using the CreateActivation command. It is applied to the managed instance by - * specifying the Activation Code and Activation ID when you install SSM Agent on the instance, as + *

                                                              The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is + * activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName + * property using the CreateActivation command. It is applied to the managed node + * by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as * explained in Install SSM Agent for a * hybrid environment (Linux) and Install SSM Agent for a - * hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2 - * DescribeInstances operation. For information, see DescribeInstances in the - * Amazon EC2 API Reference or describe-instances in the - * Amazon Web Services CLI Command Reference.

                                                              + * hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, + * use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in + * the Amazon Web Services CLI Command Reference.

                                                              */ Name?: string; /** - *

                                                              The IP address of the managed instance.

                                                              + *

                                                              The IP address of the managed node.

                                                              */ IPAddress?: string; /** - *

                                                              The fully qualified host name of the managed instance.

                                                              + *

                                                              The fully qualified host name of the managed node.

                                                              */ ComputerName?: string; @@ -6929,6 +6927,18 @@ export interface InstanceInformation { *

                                                              Information about the association.

                                                              */ AssociationOverview?: InstanceAggregatedAssociationOverview; + + /** + *

                                                              The ID of the source resource. For IoT Greengrass devices, SourceId is + * the Thing name.

                                                              + */ + SourceId?: string; + + /** + *

                                                              The type of the source resource. For IoT Greengrass devices, SourceType + * is AWS::IoT::Thing.

                                                              + */ + SourceType?: SourceType | string; } export namespace InstanceInformation { @@ -6942,7 +6952,7 @@ export namespace InstanceInformation { export interface DescribeInstanceInformationResult { /** - *

                                                              The instance information list.

                                                              + *

                                                              The managed node information list.

                                                              */ InstanceInformationList?: InstanceInformation[]; @@ -6982,7 +6992,7 @@ export namespace InvalidInstanceInformationFilterValue { export interface DescribeInstancePatchesRequest { /** - *

                                                              The ID of the instance whose patch state information should be retrieved.

                                                              + *

                                                              The ID of the managed node whose patch state information should be retrieved.

                                                              */ InstanceId: string | undefined; @@ -7063,8 +7073,8 @@ export enum PatchComplianceDataState { } /** - *

                                                              Information about the state of a patch on a particular instance as it relates to the patch - * baseline used to patch the instance.

                                                              + *

                                                              Information about the state of a patch on a particular managed node as it relates to the patch + * baseline used to patch the node.

                                                              */ export interface PatchComplianceData { /** @@ -7090,13 +7100,13 @@ export interface PatchComplianceData { Severity: string | undefined; /** - *

                                                              The state of the patch on the instance, such as INSTALLED or FAILED.

                                                              + *

                                                              The state of the patch on the managed node, such as INSTALLED or FAILED.

                                                              *

                                                              For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.

                                                              */ State: PatchComplianceDataState | string | undefined; /** - *

                                                              The date/time the patch was installed on the instance. Not all operating systems provide + *

                                                              The date/time the patch was installed on the managed node. Not all operating systems provide * this level of information.

                                                              */ InstalledTime: Date | undefined; @@ -7164,7 +7174,7 @@ export namespace DescribeInstancePatchesResult { export interface DescribeInstancePatchStatesRequest { /** - *

                                                              The ID of the instance for which patch state information should be retrieved.

                                                              + *

                                                              The ID of the managed node for which patch state information should be retrieved.

                                                              */ InstanceIds: string[] | undefined; @@ -7175,7 +7185,7 @@ export interface DescribeInstancePatchStatesRequest { NextToken?: string; /** - *

                                                              The maximum number of instances to return (per page).

                                                              + *

                                                              The maximum number of managed nodes to return (per page).

                                                              */ MaxResults?: number; } @@ -7200,24 +7210,24 @@ export enum RebootOption { } /** - *

                                                              Defines the high-level patch compliance state for a managed instance, providing information + *

                                                              Defines the high-level patch compliance state for a managed node, providing information * about the number of installed, missing, not applicable, and failed patches along with metadata - * about the operation when this information was gathered for the instance.

                                                              + * about the operation when this information was gathered for the managed node.

                                                              */ export interface InstancePatchState { /** - *

                                                              The ID of the managed instance the high-level patch compliance information was collected + *

                                                              The ID of the managed node the high-level patch compliance information was collected * for.

                                                              */ InstanceId: string | undefined; /** - *

                                                              The name of the patch group the managed instance belongs to.

                                                              + *

                                                              The name of the patch group the managed node belongs to.

                                                              */ PatchGroup: string | undefined; /** - *

                                                              The ID of the patch baseline used to patch the instance.

                                                              + *

                                                              The ID of the patch baseline used to patch the managed node.

                                                              */ BaselineId: string | undefined; @@ -7246,24 +7256,24 @@ export interface InstancePatchState { OwnerInformation?: string; /** - *

                                                              The number of patches from the patch baseline that are installed on the instance.

                                                              + *

                                                              The number of patches from the patch baseline that are installed on the managed node.

                                                              */ InstalledCount?: number; /** *

                                                              The number of patches not specified in the patch baseline that are installed on the - * instance.

                                                              + * managed node.

                                                              */ InstalledOtherCount?: number; /** - *

                                                              The number of patches installed by Patch Manager since the last time the instance was + *

                                                              The number of patches installed by Patch Manager since the last time the managed node was * rebooted.

                                                              */ InstalledPendingRebootCount?: number; /** - *

                                                              The number of patches installed on an instance that are specified in a + *

                                                              The number of patches installed on a managed node that are specified in a * RejectedPatches list. Patches with a status of InstalledRejected were * typically installed before they were added to a RejectedPatches list.

                                                              * @@ -7275,7 +7285,7 @@ export interface InstancePatchState { InstalledRejectedCount?: number; /** - *

                                                              The number of patches from the patch baseline that are applicable for the instance but + *

                                                              The number of patches from the patch baseline that are applicable for the managed node but * aren't currently installed.

                                                              */ MissingCount?: number; @@ -7293,20 +7303,20 @@ export interface InstancePatchState { UnreportedNotApplicableCount?: number; /** - *

                                                              The number of patches from the patch baseline that aren't applicable for the instance and - * therefore aren't installed on the instance. This number may be truncated if the list of patch + *

                                                              The number of patches from the patch baseline that aren't applicable for the managed node and + * therefore aren't installed on the node. This number may be truncated if the list of patch * names is very large. The number of patches beyond this limit are reported in * UnreportedNotApplicableCount.

                                                              */ NotApplicableCount?: number; /** - *

                                                              The time the most recent patching operation was started on the instance.

                                                              + *

                                                              The time the most recent patching operation was started on the managed node.

                                                              */ OperationStartTime: Date | undefined; /** - *

                                                              The time the most recent patching operation completed on the instance.

                                                              + *

                                                              The time the most recent patching operation completed on the managed node.

                                                              */ OperationEndTime: Date | undefined; @@ -7326,7 +7336,7 @@ export interface InstancePatchState { Operation: PatchOperationType | string | undefined; /** - *

                                                              The time of the last attempt to patch the instance with NoReboot specified as + *

                                                              The time of the last attempt to patch the managed node with NoReboot specified as * the reboot option.

                                                              */ LastNoRebootInstallOperationTime?: Date; @@ -7340,7 +7350,7 @@ export interface InstancePatchState { *
                                                                *
                                                              • *

                                                                - * RebootIfNeeded: Patch Manager tries to reboot the instance if it installed + * RebootIfNeeded: Patch Manager tries to reboot the managed node if it installed * any patches, or if any patches are detected with a status of * InstalledPendingReboot.

                                                                *
                                                              • @@ -7356,25 +7366,25 @@ export interface InstancePatchState { RebootOption?: RebootOption | string; /** - *

                                                                The number of instances where patches that are specified as Critical for + *

                                                                The number of managed nodes where patches that are specified as Critical for * compliance reporting in the patch baseline aren't installed. These patches might be missing, have - * failed installation, were rejected, or were installed but awaiting a required instance reboot. - * The status of these instances is NON_COMPLIANT.

                                                                + * failed installation, were rejected, or were installed but awaiting a required managed node reboot. + * The status of these managed nodes is NON_COMPLIANT.

                                                                */ CriticalNonCompliantCount?: number; /** - *

                                                                The number of instances where patches that are specified as Security in a patch - * advisory aren't installed. These patches might be missing, have failed installation, were - * rejected, or were installed but awaiting a required instance reboot. The status of these - * instances is NON_COMPLIANT.

                                                                + *

                                                                The number of managed nodes where patches that are specified as Security in a + * patch advisory aren't installed. These patches might be missing, have failed installation, were + * rejected, or were installed but awaiting a required managed node reboot. The status of these managed + * nodes is NON_COMPLIANT.

                                                                */ SecurityNonCompliantCount?: number; /** - *

                                                                The number of instances with patches installed that are specified as other than + *

                                                                The number of managed nodes with patches installed that are specified as other than * Critical or Security but aren't compliant with the patch baseline. The - * status of these instances is NON_COMPLIANT.

                                                                + * status of these managed nodes is NON_COMPLIANT.

                                                                */ OtherNonCompliantCount?: number; } @@ -7391,7 +7401,7 @@ export namespace InstancePatchState { export interface DescribeInstancePatchStatesResult { /** - *

                                                                The high-level patch state for the requested instances.

                                                                + *

                                                                The high-level patch state for the requested managed nodes.

                                                                */ InstancePatchStates?: InstancePatchState[]; @@ -7425,7 +7435,7 @@ export enum InstancePatchStateOperatorType { *

                                                                Defines a filter used in DescribeInstancePatchStatesForPatchGroup to scope * down the information returned by the API.

                                                                *

                                                                - * Example: To filter for all instances in a patch group + * Example: To filter for all managed nodes in a patch group * having more than three patches with a FailedCount status, use the following for the * filter:

                                                                *
                                                                  @@ -7557,7 +7567,7 @@ export namespace DescribeInstancePatchStatesForPatchGroupRequest { export interface DescribeInstancePatchStatesForPatchGroupResult { /** - *

                                                                  The high-level patch state for the requested instances.

                                                                  + *

                                                                  The high-level patch state for the requested managed nodes.

                                                                  */ InstancePatchStates?: InstancePatchState[]; @@ -8302,7 +8312,7 @@ export interface DescribeMaintenanceWindowScheduleRequest { WindowId?: string; /** - *

                                                                  The instance ID or key-value pair to retrieve information about.

                                                                  + *

                                                                  The managed node ID or key-value pair to retrieve information about.

                                                                  */ Targets?: Target[]; @@ -8394,7 +8404,7 @@ export namespace DescribeMaintenanceWindowScheduleResult { export interface DescribeMaintenanceWindowsForTargetRequest { /** - *

                                                                  The instance ID or key-value pair to retrieve information about.

                                                                  + *

                                                                  The managed node ID or key-value pair to retrieve information about.

                                                                  */ Targets: Target[] | undefined; @@ -8452,7 +8462,7 @@ export namespace MaintenanceWindowIdentityForTarget { export interface DescribeMaintenanceWindowsForTargetResult { /** - *

                                                                  Information about the maintenance window targets and tasks an instance is associated + *

                                                                  Information about the maintenance window targets and tasks a managed node is associated * with.

                                                                  */ WindowIdentities?: MaintenanceWindowIdentityForTarget[]; @@ -8527,8 +8537,8 @@ export interface MaintenanceWindowTarget { ResourceType?: MaintenanceWindowResourceType | string; /** - *

                                                                  The targets, either instances or tags.

                                                                  - *

                                                                  Specify instances using the following format:

                                                                  + *

                                                                  The targets, either managed nodes or tags.

                                                                  + *

                                                                  Specify managed nodes using the following format:

                                                                  *

                                                                  * Key=instanceids,Values=, *

                                                                  @@ -8631,7 +8641,7 @@ export enum MaintenanceWindowTaskCutoffBehavior { /** *

                                                                  Information about an Amazon Simple Storage Service (Amazon S3) bucket to write - * instance-level logs to.

                                                                  + * managed node-level logs to.

                                                                  * *

                                                                  * LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the @@ -8715,7 +8725,7 @@ export interface MaintenanceWindowTask { Type?: MaintenanceWindowTaskType | string; /** - *

                                                                  The targets (either instances or tags). Instances are specified using + *

                                                                  The targets (either managed nodes or tags). Managed nodes are specified using * Key=instanceids,Values=,. Tags are specified * using Key=,Values=.

                                                                  */ @@ -9268,9 +9278,3 @@ export enum ParameterTier { INTELLIGENT_TIERING = "Intelligent-Tiering", STANDARD = "Standard", } - -export enum ParameterType { - SECURE_STRING = "SecureString", - STRING = "String", - STRING_LIST = "StringList", -} diff --git a/clients/client-ssm/src/models/models_1.ts b/clients/client-ssm/src/models/models_1.ts index a01359935820..f999a9049087 100644 --- a/clients/client-ssm/src/models/models_1.ts +++ b/clients/client-ssm/src/models/models_1.ts @@ -33,7 +33,6 @@ import { ParameterInlinePolicy, ParameterStringFilter, ParameterTier, - ParameterType, PatchAction, PatchComplianceLevel, PatchFilterGroup, @@ -54,6 +53,12 @@ import { TargetLocation, } from "./models_0"; +export enum ParameterType { + SECURE_STRING = "SecureString", + STRING = "String", + STRING_LIST = "StringList", +} + /** *

                                                                  Metadata includes information like the ARN of the last user and the date/time the parameter * was last used.

                                                                  @@ -415,29 +420,30 @@ export namespace DescribePatchGroupStateRequest { export interface DescribePatchGroupStateResult { /** - *

                                                                  The number of instances in the patch group.

                                                                  + *

                                                                  The number of managed nodes in the patch group.

                                                                  */ Instances?: number; /** - *

                                                                  The number of instances with installed patches.

                                                                  + *

                                                                  The number of managed nodes with installed patches.

                                                                  */ InstancesWithInstalledPatches?: number; /** - *

                                                                  The number of instances with patches installed that aren't defined in the patch + *

                                                                  The number of managed nodes with patches installed that aren't defined in the patch * baseline.

                                                                  */ InstancesWithInstalledOtherPatches?: number; /** - *

                                                                  The number of instances with patches installed by Patch Manager that haven't been rebooted - * after the patch installation. The status of these instances is NON_COMPLIANT.

                                                                  + *

                                                                  The number of managed nodes with patches installed by Patch Manager that haven't been + * rebooted after the patch installation. The status of these managed nodes is + * NON_COMPLIANT.

                                                                  */ InstancesWithInstalledPendingRebootPatches?: number; /** - *

                                                                  The number of instances with patches installed that are specified in a + *

                                                                  The number of managed nodes with patches installed that are specified in a * RejectedPatches list. Patches with a status of INSTALLED_REJECTED were * typically installed before they were added to a RejectedPatches list.

                                                                  * @@ -449,46 +455,47 @@ export interface DescribePatchGroupStateResult { InstancesWithInstalledRejectedPatches?: number; /** - *

                                                                  The number of instances with missing patches from the patch baseline.

                                                                  + *

                                                                  The number of managed nodes with missing patches from the patch baseline.

                                                                  */ InstancesWithMissingPatches?: number; /** - *

                                                                  The number of instances with patches from the patch baseline that failed to install.

                                                                  + *

                                                                  The number of managed nodes with patches from the patch baseline that failed to + * install.

                                                                  */ InstancesWithFailedPatches?: number; /** - *

                                                                  The number of instances with patches that aren't applicable.

                                                                  + *

                                                                  The number of managed nodes with patches that aren't applicable.

                                                                  */ InstancesWithNotApplicablePatches?: number; /** - *

                                                                  The number of instances with NotApplicable patches beyond the supported limit, - * which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

                                                                  + *

                                                                  The number of managed nodes with NotApplicable patches beyond the supported + * limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

                                                                  */ InstancesWithUnreportedNotApplicablePatches?: number; /** - *

                                                                  The number of instances where patches that are specified as Critical for + *

                                                                  The number of managed nodes where patches that are specified as Critical for * compliance reporting in the patch baseline aren't installed. These patches might be missing, have - * failed installation, were rejected, or were installed but awaiting a required instance reboot. - * The status of these instances is NON_COMPLIANT.

                                                                  + * failed installation, were rejected, or were installed but awaiting a required managed node reboot. + * The status of these managed nodes is NON_COMPLIANT.

                                                                  */ InstancesWithCriticalNonCompliantPatches?: number; /** - *

                                                                  The number of instances where patches that are specified as Security in a patch - * advisory aren't installed. These patches might be missing, have failed installation, were - * rejected, or were installed but awaiting a required instance reboot. The status of these - * instances is NON_COMPLIANT.

                                                                  + *

                                                                  The number of managed nodes where patches that are specified as Security in a + * patch advisory aren't installed. These patches might be missing, have failed installation, were + * rejected, or were installed but awaiting a required managed node reboot. The status of these managed + * nodes is NON_COMPLIANT.

                                                                  */ InstancesWithSecurityNonCompliantPatches?: number; /** - *

                                                                  The number of instances with patches installed that are specified as other than + *

                                                                  The number of managed nodes with patches installed that are specified as other than * Critical or Security but aren't compliant with the patch baseline. The - * status of these instances is NON_COMPLIANT.

                                                                  + * status of these managed nodes is NON_COMPLIANT.

                                                                  */ InstancesWithOtherNonCompliantPatches?: number; } @@ -606,7 +613,7 @@ export interface SessionFilter { * 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

                                                                  * *
                                                                • - *

                                                                  Target: Specify an instance to which session connections have been made.

                                                                  + *

                                                                  Target: Specify a managed node to which session connections have been made.

                                                                  *
                                                                • *
                                                                • *

                                                                  Owner: Specify an Amazon Web Services user account to see a list of sessions started by that @@ -725,7 +732,7 @@ export enum SessionStatus { } /** - *

                                                                  Information about a Session Manager connection to an instance.

                                                                  + *

                                                                  Information about a Session Manager connection to a managed node.

                                                                  */ export interface Session { /** @@ -734,7 +741,7 @@ export interface Session { SessionId?: string; /** - *

                                                                  The instance that the Session Manager session connected to.

                                                                  + *

                                                                  The managed node that the Session Manager session connected to.

                                                                  */ Target?: string; @@ -1233,9 +1240,8 @@ export interface GetCommandInvocationRequest { CommandId: string | undefined; /** - *

                                                                  (Required) The ID of the managed instance targeted by the command. A managed instance can be - * an Amazon Elastic Compute Cloud (Amazon EC2) instance or an instance in your hybrid environment that is configured for - * Amazon Web Services Systems Manager.

                                                                  + *

                                                                  (Required) The ID of the managed node targeted by the command. A managed node can be an + * Amazon Elastic Compute Cloud (Amazon EC2) instance, edge device, and on-premises server or VM in your hybrid environment that is configured for Amazon Web Services Systems Manager.

                                                                  */ InstanceId: string | undefined; @@ -1311,8 +1317,8 @@ export interface GetCommandInvocationResult { CommandId?: string; /** - *

                                                                  The ID of the managed instance targeted by the command. A managed instance can be an EC2 - * instance or an instance in your hybrid environment that is configured for Systems Manager.

                                                                  + *

                                                                  The ID of the managed node targeted by the command. A managed node can be an + * Amazon Elastic Compute Cloud (Amazon EC2) instance, edge device, or on-premises server or VM in your hybrid environment that is configured for Amazon Web Services Systems Manager.

                                                                  */ InstanceId?: string; @@ -1339,8 +1345,8 @@ export interface GetCommandInvocationResult { /** *

                                                                  The error level response code for the plugin script. If the response code is - * -1, then the command hasn't started running on the instance, or it wasn't received - * by the instance.

                                                                  + * -1, then the command hasn't started running on the managed node, or it wasn't received + * by the node.

                                                                  */ ResponseCode?: number; @@ -1386,33 +1392,33 @@ export interface GetCommandInvocationResult { * StatusDetails can be one of the following values:

                                                                  *
                                                                    *
                                                                  • - *

                                                                    Pending: The command hasn't been sent to the instance.

                                                                    + *

                                                                    Pending: The command hasn't been sent to the managed node.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    In Progress: The command has been sent to the instance but hasn't reached a terminal + *

                                                                    In Progress: The command has been sent to the managed node but hasn't reached a terminal * state.

                                                                    *
                                                                  • *
                                                                  • *

                                                                    Delayed: The system attempted to send the command to the target, but the target wasn't - * available. The instance might not be available because of network issues, because the instance + * available. The managed node might not be available because of network issues, because the node * was stopped, or for similar reasons. The system will try to send the command again.

                                                                    *
                                                                  • *
                                                                  • *

                                                                    Success: The command or plugin ran successfully. This is a terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Delivery Timed Out: The command wasn't delivered to the instance before the delivery + *

                                                                    Delivery Timed Out: The command wasn't delivered to the managed node before the delivery * timeout expired. Delivery timeouts don't count against the parent command's * MaxErrors limit, but they do contribute to whether the parent command status is * Success or Incomplete. This is a terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Execution Timed Out: The command started to run on the instance, but the execution wasn't + *

                                                                    Execution Timed Out: The command started to run on the managed node, but the execution wasn't * complete before the timeout expired. Execution timeouts count against the * MaxErrors limit of the parent command. This is a terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Failed: The command wasn't run successfully on the instance. For a plugin, this indicates + *

                                                                    Failed: The command wasn't run successfully on the managed node. For a plugin, this indicates * that the result code wasn't zero. For a command invocation, this indicates that the result code * for one or more plugins wasn't zero. Invocation failures count against the * MaxErrors limit of the parent command. This is a terminal state.

                                                                    @@ -1422,7 +1428,7 @@ export interface GetCommandInvocationResult { * state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Undeliverable: The command can't be delivered to the instance. The instance might not + *

                                                                    Undeliverable: The command can't be delivered to the managed node. The node might not * exist or might not be responding. Undeliverable invocations don't count against the parent * command's MaxErrors limit and don't contribute to whether the parent command * status is Success or Incomplete. This is a terminal state.

                                                                    @@ -1493,8 +1499,8 @@ export namespace InvalidPluginName { } /** - *

                                                                    The command ID and instance ID you specified didn't match any invocations. Verify the - * command ID and the instance ID and try again.

                                                                    + *

                                                                    The command ID and managed node ID you specified didn't match any invocations. Verify the + * command ID and the managed node ID and try again.

                                                                    */ export interface InvocationDoesNotExist extends __SmithyException, $MetadataBearer { name: "InvocationDoesNotExist"; @@ -1512,7 +1518,7 @@ export namespace InvocationDoesNotExist { export interface GetConnectionStatusRequest { /** - *

                                                                    The instance ID.

                                                                    + *

                                                                    The managed node ID.

                                                                    */ Target: string | undefined; } @@ -1533,12 +1539,12 @@ export enum ConnectionStatus { export interface GetConnectionStatusResponse { /** - *

                                                                    The ID of the instance to check connection status.

                                                                    + *

                                                                    The ID of the managed node to check connection status.

                                                                    */ Target?: string; /** - *

                                                                    The status of the connection to the instance. For example, 'Connected' or 'Not + *

                                                                    The status of the connection to the managed node. For example, 'Connected' or 'Not * Connected'.

                                                                    */ Status?: ConnectionStatus | string; @@ -1640,14 +1646,14 @@ export interface BaselineOverride { /** *

                                                                    Indicates whether the list of approved patches includes non-security updates that should be - * applied to the instances. The default value is false. Applies to Linux instances - * only.

                                                                    + * applied to the managed nodes. The default value is false. Applies to Linux managed + * nodes only.

                                                                    */ ApprovedPatchesEnableNonSecurity?: boolean; /** - *

                                                                    Information about the patches to use to update the instances, including target operating - * systems and source repositories. Applies to Linux instances only.

                                                                    + *

                                                                    Information about the patches to use to update the managed nodes, including target operating + * systems and source repositories. Applies to Linux managed nodes only.

                                                                    */ Sources?: PatchSource[]; } @@ -1664,7 +1670,7 @@ export namespace BaselineOverride { export interface GetDeployablePatchSnapshotForInstanceRequest { /** - *

                                                                    The ID of the instance for which the appropriate patch snapshot should be retrieved.

                                                                    + *

                                                                    The ID of the managed node for which the appropriate patch snapshot should be retrieved.

                                                                    */ InstanceId: string | undefined; @@ -1690,7 +1696,7 @@ export namespace GetDeployablePatchSnapshotForInstanceRequest { export interface GetDeployablePatchSnapshotForInstanceResult { /** - *

                                                                    The instance ID.

                                                                    + *

                                                                    The managed node ID.

                                                                    */ InstanceId?: string; @@ -1707,7 +1713,7 @@ export interface GetDeployablePatchSnapshotForInstanceResult { /** *

                                                                    Returns the specific operating system (for example Windows Server 2012 or Amazon Linux - * 2015.09) on the instance for the specified patch snapshot.

                                                                    + * 2015.09) on the managed node for the specified patch snapshot.

                                                                    */ Product?: string; } @@ -1926,7 +1932,7 @@ export interface InventoryFilter { Key: string | undefined; /** - *

                                                                    Inventory filter values. Example: inventory filter where instance IDs are specified as + *

                                                                    Inventory filter values. Example: inventory filter where managed node IDs are specified as * values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g, * i-1a2b3c4d5e6,Type=Equal.

                                                                    */ @@ -2046,8 +2052,8 @@ export namespace InventoryResultItem { */ export interface InventoryResultEntity { /** - *

                                                                    ID of the inventory result entity. For example, for managed instance inventory the result - * will be the managed instance ID. For EC2 instance inventory, the result will be the instance ID. + *

                                                                    ID of the inventory result entity. For example, for managed node inventory the result + * will be the managed node ID. For EC2 instance inventory, the result will be the instance ID. *

                                                                    */ Id?: string; @@ -2069,7 +2075,7 @@ export namespace InventoryResultEntity { export interface GetInventoryResult { /** - *

                                                                    Collection of inventory entities such as a collection of instance inventory.

                                                                    + *

                                                                    Collection of inventory entities such as a collection of managed node inventory.

                                                                    */ Entities?: InventoryResultEntity[]; @@ -2819,8 +2825,8 @@ export interface NotificationConfig { *
                                                                  • *
                                                                  • *

                                                                    - * Invocation: For commands sent to multiple instances, receive notification on - * a per-instance basis when the status of a command changes.

                                                                    + * Invocation: For commands sent to multiple managed nodes, receive notification + * on a per-node basis when the status of a command changes.

                                                                    *
                                                                  • *
                                                                  */ @@ -2896,7 +2902,7 @@ export interface MaintenanceWindowRunCommandParameters { DocumentVersion?: string; /** - *

                                                                  Configurations for sending notifications about command status changes on a per-instance + *

                                                                  Configurations for sending notifications about command status changes on a per-managed node * basis.

                                                                  */ NotificationConfig?: NotificationConfig; @@ -4002,8 +4008,8 @@ export interface GetPatchBaselineResult { /** *

                                                                  Indicates whether the list of approved patches includes non-security updates that should be - * applied to the instances. The default value is false. Applies to Linux instances - * only.

                                                                  + * applied to the managed nodes. The default value is false. Applies to Linux managed + * nodes only.

                                                                  */ ApprovedPatchesEnableNonSecurity?: boolean; @@ -4040,8 +4046,8 @@ export interface GetPatchBaselineResult { Description?: string; /** - *

                                                                  Information about the patches to use to update the instances, including target operating - * systems and source repositories. Applies to Linux instances only.

                                                                  + *

                                                                  Information about the patches to use to update the managed nodes, including target operating + * systems and source repositories. Applies to Linux managed nodes only.

                                                                  */ Sources?: PatchSource[]; } @@ -4377,7 +4383,7 @@ export interface ListAssociationsRequest { * *

                                                                  Filtering associations using the InstanceID attribute only returns legacy * associations created using the InstanceID attribute. Associations targeting the - * instance that are part of the Target Attributes ResourceGroup or Tags + * managed node that are part of the Target Attributes ResourceGroup or Tags * aren't returned.

                                                                  *
                                                                  */ @@ -4406,7 +4412,7 @@ export namespace ListAssociationsRequest { } /** - *

                                                                  Describes an association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

                                                                  + *

                                                                  Describes an association of a Amazon Web Services Systems Manager document (SSM document) and a managed node.

                                                                  */ export interface Association { /** @@ -4415,7 +4421,7 @@ export interface Association { Name?: string; /** - *

                                                                  The instance ID.

                                                                  + *

                                                                  The managed node ID.

                                                                  */ InstanceId?: string; @@ -4436,9 +4442,9 @@ export interface Association { DocumentVersion?: string; /** - *

                                                                  The instances targeted by the request to create an association. You can target all instances - * in an Amazon Web Services account by specifying the InstanceIds key with a value of - * *.

                                                                  + *

                                                                  The managed nodes targeted by the request to create an association. You can target all + * managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of + * *.

                                                                  */ Targets?: Target[]; @@ -4586,8 +4592,8 @@ export interface AssociationVersionInfo { * example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, * the system stops sending requests when the fourth error is received. If you specify 0, then the * system stops sending requests after the first error is returned. If you run an association on 50 - * instances and set MaxError to 10%, then the system stops sending the request when - * the sixth error is received.

                                                                  + * managed nodes and set MaxError to 10%, then the system stops sending the request + * when the sixth error is received.

                                                                  *

                                                                  Executions that are already running an association when MaxErrors is reached * are allowed to complete, but some of these executions may fail as well. If you need to ensure * that there won't be more than max-errors failed executions, set MaxConcurrency to 1 @@ -4599,9 +4605,9 @@ export interface AssociationVersionInfo { *

                                                                  The maximum number of targets allowed to run the association at the same time. You can * specify a number, for example 10, or a percentage of the target set, for example 10%. The default * value is 100%, which means all targets run the association at the same time.

                                                                  - *

                                                                  If a new instance starts and attempts to run an association while Systems Manager is running + *

                                                                  If a new managed node starts and attempts to run an association while Systems Manager is running * MaxConcurrency associations, the association is allowed to run. During the next - * association interval, the new instance will process its association within the limit specified + * association interval, the new managed node will process its association within the limit specified * for MaxConcurrency.

                                                                  */ MaxConcurrency?: string; @@ -4694,8 +4700,8 @@ export enum CommandFilterKey { /** *

                                                                  Describes a command filter.

                                                                  * - *

                                                                  An instance ID can't be specified when a command status is Pending because the - * command hasn't run on the instance yet.

                                                                  + *

                                                                  A managed node ID can't be specified when a command status is Pending because the + * command hasn't run on the node yet.

                                                                  *
                                                                  */ export interface CommandFilter { @@ -4862,7 +4868,7 @@ export interface CommandFilter { * DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM * document) for which you want to see command execution results. For example, specify * AWS-RunPatchBaseline to see command executions that used this SSM document to - * perform security patching operations on instances.

                                                                  + * perform security patching operations on managed nodes.

                                                                  *
                                                                • *
                                                                • *

                                                                  @@ -4902,7 +4908,7 @@ export interface ListCommandInvocationsRequest { CommandId?: string; /** - *

                                                                  (Optional) The command execution details for a specific instance ID.

                                                                  + *

                                                                  (Optional) The command execution details for a specific managed node ID.

                                                                  */ InstanceId?: string; @@ -4975,10 +4981,10 @@ export interface CommandPlugin { * following values:

                                                                  *
                                                                    *
                                                                  • - *

                                                                    Pending: The command hasn't been sent to the instance.

                                                                    + *

                                                                    Pending: The command hasn't been sent to the managed node.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    In Progress: The command has been sent to the instance but hasn't reached a terminal + *

                                                                    In Progress: The command has been sent to the managed node but hasn't reached a terminal * state.

                                                                    *
                                                                  • *
                                                                  • @@ -4986,18 +4992,18 @@ export interface CommandPlugin { * terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Delivery Timed Out: The command wasn't delivered to the instance before the delivery + *

                                                                    Delivery Timed Out: The command wasn't delivered to the managed node before the delivery * timeout expired. Delivery timeouts don't count against the parent command's * MaxErrors limit, but they do contribute to whether the parent command status is * Success or Incomplete. This is a terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Execution Timed Out: Command execution started on the instance, but the execution wasn't + *

                                                                    Execution Timed Out: Command execution started on the managed node, but the execution wasn't * complete before the execution timeout expired. Execution timeouts count against the * MaxErrors limit of the parent command. This is a terminal state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Failed: The command wasn't successful on the instance. For a plugin, this indicates that + *

                                                                    Failed: The command wasn't successful on the managed node. For a plugin, this indicates that * the result code wasn't zero. For a command invocation, this indicates that the result code for * one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the * parent command. This is a terminal state.

                                                                    @@ -5007,7 +5013,7 @@ export interface CommandPlugin { * state.

                                                                    *
                                                                  • *
                                                                  • - *

                                                                    Undeliverable: The command can't be delivered to the instance. The instance might not + *

                                                                    Undeliverable: The command can't be delivered to the managed node. The managed node might not * exist, or it might not be responding. Undeliverable invocations don't count against the parent * command's MaxErrors limit, and they don't contribute to whether the parent command status is * Success or Incomplete. This is a terminal state.

                                                                    @@ -5070,7 +5076,7 @@ export interface CommandPlugin { *

                                                                    * ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                    *

                                                                    - * i-02573cafcfEXAMPLE is the instance ID;

                                                                    + * i-02573cafcfEXAMPLE is the managed node ID;

                                                                    *

                                                                    * awsrunShellScript is the name of the plugin.

                                                                    */ @@ -5088,7 +5094,7 @@ export interface CommandPlugin { *

                                                                    * ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                    *

                                                                    - * i-02573cafcfEXAMPLE is the instance ID;

                                                                    + * i-02573cafcfEXAMPLE is the managed node ID;

                                                                    *

                                                                    * awsrunShellScript is the name of the plugin.

                                                                    */ @@ -5105,11 +5111,11 @@ export namespace CommandPlugin { } /** - *

                                                                    An invocation is copy of a command sent to a specific instance. A command can apply to one - * or more instances. A command invocation applies to one instance. For example, if a user runs - * SendCommand against three instances, then a command invocation is created for each requested - * instance ID. A command invocation returns status and detail information about a command you ran. - *

                                                                    + *

                                                                    An invocation is a copy of a command sent to a specific managed node. A command can apply to one + * or more managed nodes. A command invocation applies to one managed node. For example, if a user runs + * SendCommand against three managed nodes, then a command invocation is created for + * each requested managed node ID. A command invocation returns status and detail information about a + * command you ran.

                                                                    */ export interface CommandInvocation { /** @@ -5118,12 +5124,12 @@ export interface CommandInvocation { CommandId?: string; /** - *

                                                                    The instance ID in which this invocation was requested.

                                                                    + *

                                                                    The managed node ID in which this invocation was requested.

                                                                    */ InstanceId?: string; /** - *

                                                                    The fully qualified host name of the managed instance.

                                                                    + *

                                                                    The fully qualified host name of the managed node.

                                                                    */ InstanceName?: string; @@ -5144,7 +5150,7 @@ export interface CommandInvocation { DocumentVersion?: string; /** - *

                                                                    The time and date the request was sent to this instance.

                                                                    + *

                                                                    The time and date the request was sent to this managed node.

                                                                    */ RequestedDateTime?: Date; @@ -5154,7 +5160,7 @@ export interface CommandInvocation { Status?: CommandInvocationStatus | string; /** - *

                                                                    A detailed status of the command execution for each invocation (each instance targeted by + *

                                                                    A detailed status of the command execution for each invocation (each managed node targeted by * the command). StatusDetails includes more information than Status because it includes states * resulting from error and concurrency control parameters. StatusDetails can show different results * than Status. For more information about these statuses, see Understanding command @@ -5162,10 +5168,10 @@ export interface CommandInvocation { * following values:

                                                                    *
                                                                      *
                                                                    • - *

                                                                      Pending: The command hasn't been sent to the instance.

                                                                      + *

                                                                      Pending: The command hasn't been sent to the managed node.

                                                                      *
                                                                    • *
                                                                    • - *

                                                                      In Progress: The command has been sent to the instance but hasn't reached a terminal + *

                                                                      In Progress: The command has been sent to the managed node but hasn't reached a terminal * state.

                                                                      *
                                                                    • *
                                                                    • @@ -5173,18 +5179,18 @@ export interface CommandInvocation { * terminal state.

                                                                      *
                                                                    • *
                                                                    • - *

                                                                      Delivery Timed Out: The command wasn't delivered to the instance before the delivery + *

                                                                      Delivery Timed Out: The command wasn't delivered to the managed node before the delivery * timeout expired. Delivery timeouts don't count against the parent command's * MaxErrors limit, but they do contribute to whether the parent command status is * Success or Incomplete. This is a terminal state.

                                                                      *
                                                                    • *
                                                                    • - *

                                                                      Execution Timed Out: Command execution started on the instance, but the execution wasn't + *

                                                                      Execution Timed Out: Command execution started on the managed node, but the execution wasn't * complete before the execution timeout expired. Execution timeouts count against the * MaxErrors limit of the parent command. This is a terminal state.

                                                                      *
                                                                    • *
                                                                    • - *

                                                                      Failed: The command wasn't successful on the instance. For a plugin, this indicates that + *

                                                                      Failed: The command wasn't successful on the managed node. For a plugin, this indicates that * the result code wasn't zero. For a command invocation, this indicates that the result code for * one or more plugins wasn't zero. Invocation failures count against the MaxErrors * limit of the parent command. This is a terminal state.

                                                                      @@ -5194,7 +5200,7 @@ export interface CommandInvocation { * state.

                                                                      *
                                                                    • *
                                                                    • - *

                                                                      Undeliverable: The command can't be delivered to the instance. The instance might not + *

                                                                      Undeliverable: The command can't be delivered to the managed node. The managed node might not * exist or might not be responding. Undeliverable invocations don't count against the parent * command's MaxErrors limit and don't contribute to whether the parent command status is Success * or Incomplete. This is a terminal state.

                                                                      @@ -5236,12 +5242,12 @@ export interface CommandInvocation { /** *

                                                                      The Identity and Access Management (IAM) service role that Run Command, a capability * of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes - * on a per instance basis.

                                                                      + * on a per managed node basis.

                                                                      */ ServiceRole?: string; /** - *

                                                                      Configurations for sending notifications about command status changes on a per instance + *

                                                                      Configurations for sending notifications about command status changes on a per managed node * basis.

                                                                      */ NotificationConfig?: NotificationConfig; @@ -5291,10 +5297,10 @@ export interface ListCommandsRequest { CommandId?: string; /** - *

                                                                      (Optional) Lists commands issued against this instance ID.

                                                                      + *

                                                                      (Optional) Lists commands issued against this managed node ID.

                                                                      * - *

                                                                      You can't specify an instance ID in the same command that you specify Status = - * Pending. This is because the command hasn't reached the instance yet.

                                                                      + *

                                                                      You can't specify a managed node ID in the same command that you specify Status = + * Pending. This is because the command hasn't reached the managed node yet.

                                                                      *
                                                                      */ InstanceId?: string; @@ -5375,13 +5381,14 @@ export interface Command { Parameters?: { [key: string]: string[] }; /** - *

                                                                      The instance IDs against which this command was requested.

                                                                      + *

                                                                      The managed node IDs against which this command was requested.

                                                                      */ InstanceIds?: string[]; /** - *

                                                                      An array of search criteria that targets instances using a Key,Value combination that you - * specify. Targets is required if you don't provide one or more instance IDs in the call.

                                                                      + *

                                                                      An array of search criteria that targets managed nodes using a Key,Value combination that + * you specify. Targets is required if you don't provide one or more managed node IDs in the + * call.

                                                                      */ Targets?: Target[]; @@ -5404,11 +5411,11 @@ export interface Command { * following values:

                                                                      *
                                                                        *
                                                                      • - *

                                                                        Pending: The command hasn't been sent to any instances.

                                                                        + *

                                                                        Pending: The command hasn't been sent to any managed nodes.

                                                                        *
                                                                      • *
                                                                      • - *

                                                                        In Progress: The command has been sent to at least one instance but hasn't reached a final - * state on all instances.

                                                                        + *

                                                                        In Progress: The command has been sent to at least one managed node but hasn't reached a final + * state on all managed nodes.

                                                                        *
                                                                      • *
                                                                      • *

                                                                        Success: The command successfully ran on all invocations. This is a terminal state.

                                                                        @@ -5426,18 +5433,18 @@ export interface Command { * is a terminal state.

                                                                        *
                                                                      • *
                                                                      • - *

                                                                        Incomplete: The command was attempted on all instances and one or more invocations doesn't - * have a value of Success but not enough invocations failed for the status to be Failed. This is - * a terminal state.

                                                                        + *

                                                                        Incomplete: The command was attempted on all managed nodes and one or more invocations + * doesn't have a value of Success but not enough invocations failed for the status to be Failed. + * This is a terminal state.

                                                                        *
                                                                      • *
                                                                      • *

                                                                        Canceled: The command was terminated before it was completed. This is a terminal * state.

                                                                        *
                                                                      • *
                                                                      • - *

                                                                        Rate Exceeded: The number of instances targeted by the command exceeded the account limit - * for pending invocations. The system has canceled the command before running it on any instance. - * This is a terminal state.

                                                                        + *

                                                                        Rate Exceeded: The number of managed nodes targeted by the command exceeded the account + * limit for pending invocations. The system has canceled the command before running it on any + * managed node. This is a terminal state.

                                                                        *
                                                                      • *
                                                                      */ @@ -5462,8 +5469,8 @@ export interface Command { OutputS3KeyPrefix?: string; /** - *

                                                                      The maximum number of instances that are allowed to run the command at the same time. You - * can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The + *

                                                                      The maximum number of managed nodes that are allowed to run the command at the same time. + * You can specify a number of managed nodes, such as 10, or a percentage of nodes, such as 10%. The * default value is 50. For more information about how to use MaxConcurrency, see * Running * commands using Systems Manager Run Command in the Amazon Web Services Systems Manager User Guide.

                                                                      @@ -5702,7 +5709,7 @@ export interface ComplianceItem { ResourceType?: string; /** - *

                                                                      An ID for the resource. For a managed instance, this is the instance ID.

                                                                      + *

                                                                      An ID for the resource. For a managed node, this is the node ID.

                                                                      */ ResourceId?: string; @@ -5804,7 +5811,7 @@ export namespace ListComplianceSummariesRequest { } /** - *

                                                                      The number of managed instances found for each patch severity level defined in the request + *

                                                                      The number of managed nodes found for each patch severity level defined in the request * filter.

                                                                      */ export interface SeveritySummary { @@ -6594,7 +6601,7 @@ export namespace ListDocumentVersionsResult { export interface ListInventoryEntriesRequest { /** - *

                                                                      The instance ID for which you want inventory information.

                                                                      + *

                                                                      The managed node ID for which you want inventory information.

                                                                      */ InstanceId: string | undefined; @@ -6637,22 +6644,22 @@ export interface ListInventoryEntriesResult { TypeName?: string; /** - *

                                                                      The instance ID targeted by the request to query inventory information.

                                                                      + *

                                                                      The managed node ID targeted by the request to query inventory information.

                                                                      */ InstanceId?: string; /** - *

                                                                      The inventory schema version used by the instance(s).

                                                                      + *

                                                                      The inventory schema version used by the managed node(s).

                                                                      */ SchemaVersion?: string; /** - *

                                                                      The time that inventory information was collected for the instance(s).

                                                                      + *

                                                                      The time that inventory information was collected for the managed node(s).

                                                                      */ CaptureTime?: string; /** - *

                                                                      A list of inventory items on the instance(s).

                                                                      + *

                                                                      A list of inventory items on the managed node(s).

                                                                      */ Entries?: { [key: string]: string }[]; @@ -7182,9 +7189,9 @@ export namespace ResourceComplianceSummaryItem { export interface ListResourceComplianceSummariesResult { /** - *

                                                                      A summary count for specified or targeted managed instances. Summary count includes - * information about compliant and non-compliant State Manager associations, patch status, or custom - * items according to the filter criteria that you specify.

                                                                      + *

                                                                      A summary count for specified or targeted managed nodes. Summary count includes information + * about compliant and non-compliant State Manager associations, patch status, or custom items + * according to the filter criteria that you specify.

                                                                      */ ResourceComplianceSummaryItems?: ResourceComplianceSummaryItem[]; @@ -7621,7 +7628,7 @@ export enum ComplianceUploadType { export interface PutComplianceItemsRequest { /** - *

                                                                      Specify an ID for this resource. For a managed instance, this is the instance ID.

                                                                      + *

                                                                      Specify an ID for this resource. For a managed node, this is the node ID.

                                                                      */ ResourceId: string | undefined; @@ -7769,7 +7776,7 @@ export namespace ItemContentMismatchException { } /** - *

                                                                      Information collected from managed instances based on your inventory policy document

                                                                      + *

                                                                      Information collected from managed nodes based on your inventory policy document

                                                                      */ export interface InventoryItem { /** @@ -7822,12 +7829,12 @@ export namespace InventoryItem { export interface PutInventoryRequest { /** - *

                                                                      An instance ID where you want to add or update inventory items.

                                                                      + *

                                                                      An managed node ID where you want to add or update inventory items.

                                                                      */ InstanceId: string | undefined; /** - *

                                                                      The inventory items that you want to add or update on instances.

                                                                      + *

                                                                      The inventory items that you want to add or update on managed nodes.

                                                                      */ Items: InventoryItem[] | undefined; } @@ -8541,53 +8548,48 @@ export interface RegisterTargetWithMaintenanceWindowRequest { ResourceType: MaintenanceWindowResourceType | string | undefined; /** - *

                                                                      The targets to register with the maintenance window. In other words, the instances to run - * commands on when the maintenance window runs.

                                                                      + *

                                                                      The targets to register with the maintenance window. In other words, the managed nodes to + * run commands on when the maintenance window runs.

                                                                      * *

                                                                      If a single maintenance window task is registered with multiple targets, its task * invocations occur sequentially and not in parallel. If your task must run on multiple targets at * the same time, register a task for each target individually and assign each task the same * priority level.

                                                                      *
                                                                      - *

                                                                      You can specify targets using instance IDs, resource group names, or tags that have been - * applied to instances.

                                                                      + *

                                                                      You can specify targets using managed node IDs, resource group names, or tags that have been + * applied to managed nodes.

                                                                      *

                                                                      - * Example 1: Specify instance IDs

                                                                      + * Example 1: Specify managed node IDs

                                                                      *

                                                                      - * Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3 - * + * Key=InstanceIds,Values=,, *

                                                                      *

                                                                      - * Example 2: Use tag key-pairs applied to instances

                                                                      + * Example 2: Use tag key-pairs applied to managed + * nodes

                                                                      *

                                                                      - * Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2 - * + * Key=tag:,Values=, *

                                                                      *

                                                                      - * Example 3: Use tag-keys applied to instances

                                                                      + * Example 3: Use tag-keys applied to managed nodes

                                                                      *

                                                                      - * Key=tag-key,Values=my-tag-key-1,my-tag-key-2 - * + * Key=tag-key,Values=, *

                                                                      * *

                                                                      * Example 4: Use resource group names

                                                                      *

                                                                      - * Key=resource-groups:Name,Values=resource-group-name - * + * Key=resource-groups:Name,Values= *

                                                                      *

                                                                      * Example 5: Use filters for resource group types

                                                                      *

                                                                      - * Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2 - * + * Key=resource-groups:ResourceTypeFilters,Values=, *

                                                                      * *

                                                                      For Key=resource-groups:ResourceTypeFilters, specify resource types in the * following format

                                                                      *

                                                                      - * Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC - * + * Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC *

                                                                      *
                                                                      * @@ -8672,7 +8674,7 @@ export interface RegisterTaskWithMaintenanceWindowRequest { WindowId: string | undefined; /** - *

                                                                      The targets (either instances or maintenance window targets).

                                                                      + *

                                                                      The targets (either managed nodes or maintenance window targets).

                                                                      * *

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks. * Depending on the task, targets are optional for other maintenance window task types (Automation, @@ -8681,7 +8683,7 @@ export interface RegisterTaskWithMaintenanceWindowRequest { * maintenance window tasks without targets in the * Amazon Web Services Systems Manager User Guide.

                                                                      *
                                                                      - *

                                                                      Specify instances using the following format:

                                                                      + *

                                                                      Specify managed nodes using the following format:

                                                                      *

                                                                      * Key=InstanceIds,Values=, *

                                                                      @@ -8772,7 +8774,7 @@ export interface RegisterTaskWithMaintenanceWindowRequest { /** *

                                                                      A structure containing information about an Amazon Simple Storage Service (Amazon S3) bucket - * to write instance-level logs to.

                                                                      + * to write managed node-level logs to.

                                                                      * *

                                                                      * LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the @@ -8866,7 +8868,7 @@ export interface RemoveTagsFromResourceRequest { *

                                                                      The type of resource from which you want to remove a tag.

                                                                      * *

                                                                      The ManagedInstance type for this API operation is only for on-premises - * managed instances. Specify the name of the managed instance in the following format: + * managed nodes. Specify the name of the managed node in the following format: * mi-ID_number * . For example, * mi-1a2b3c4d5e6f.

                                                                      @@ -8887,9 +8889,9 @@ export interface RemoveTagsFromResourceRequest { * /aws/ssm/MyGroup/appmanager.

                                                                      *

                                                                      For the Document and Parameter values, use the name of the resource.

                                                                      * - *

                                                                      The ManagedInstance type for this API operation is only for on-premises managed instances. - * Specify the name of the managed instance in the following format: mi-ID_number. For example, - * mi-1a2b3c4d5e6f.

                                                                      + *

                                                                      The ManagedInstance type for this API operation is only for on-premises + * managed nodes. Specify the name of the managed node in the following format: mi-ID_number. For + * example, mi-1a2b3c4d5e6f.

                                                                      *
                                                                      */ ResourceId: string | undefined; @@ -9016,13 +9018,13 @@ export interface ResumeSessionResponse { /** *

                                                                      An encrypted token value containing session and caller information. Used to authenticate the - * connection to the instance.

                                                                      + * connection to the managed node.

                                                                      */ TokenValue?: string; /** - *

                                                                      A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and - * receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

                                                                      + *

                                                                      A URL back to SSM Agent on the managed node that the Session Manager client uses to send commands and + * receive output from the managed node. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

                                                                      *

                                                                      * region represents the Region identifier for an * Amazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region. @@ -9201,13 +9203,13 @@ export namespace InvalidRole { export interface SendCommandRequest { /** - *

                                                                      The IDs of the instances where the command should run. Specifying instance IDs is most - * useful when you are targeting a limited number of instances, though you can specify up to 50 + *

                                                                      The IDs of the managed nodes where the command should run. Specifying managed node IDs is most + * useful when you are targeting a limited number of managed nodes, though you can specify up to 50 * IDs.

                                                                      - *

                                                                      To target a larger number of instances, or if you prefer not to list individual instance + *

                                                                      To target a larger number of managed nodes, or if you prefer not to list individual node * IDs, we recommend using the Targets option instead. Using Targets, - * which accepts tag key-value pairs to identify the instances to send commands to, you can a send - * command to tens, hundreds, or thousands of instances at once.

                                                                      + * which accepts tag key-value pairs to identify the managed nodes to send commands to, you can a + * send command to tens, hundreds, or thousands of nodes at once.

                                                                      *

                                                                      For more information about how to use targets, see Using targets and rate * controls to send commands to a fleet in the * Amazon Web Services Systems Manager User Guide.

                                                                      @@ -9215,13 +9217,13 @@ export interface SendCommandRequest { InstanceIds?: string[]; /** - *

                                                                      An array of search criteria that targets instances using a Key,Value + *

                                                                      An array of search criteria that targets managed nodes using a Key,Value * combination that you specify. Specifying targets is most useful when you want to send a command - * to a large number of instances at once. Using Targets, which accepts tag key-value - * pairs to identify instances, you can send a command to tens, hundreds, or thousands of instances - * at once.

                                                                      - *

                                                                      To send a command to a smaller number of instances, you can use the InstanceIds - * option instead.

                                                                      + * to a large number of managed nodes at once. Using Targets, which accepts tag + * key-value pairs to identify managed nodes, you can send a command to tens, hundreds, or thousands + * of nodes at once.

                                                                      + *

                                                                      To send a command to a smaller number of managed nodes, you can use the + * InstanceIds option instead.

                                                                      *

                                                                      For more information about how to use targets, see Sending commands to a * fleet in the Amazon Web Services Systems Manager User Guide.

                                                                      */ @@ -9299,8 +9301,8 @@ export interface SendCommandRequest { OutputS3KeyPrefix?: string; /** - *

                                                                      (Optional) The maximum number of instances that are allowed to run the command at the same - * time. You can specify a number such as 10 or a percentage such as 10%. The default value is + *

                                                                      (Optional) The maximum number of managed nodes that are allowed to run the command at the + * same time. You can specify a number such as 10 or a percentage such as 10%. The default value is * 50. For more information about how to use MaxConcurrency, see Using * concurrency controls in the Amazon Web Services Systems Manager User Guide.

                                                                      */ @@ -9753,14 +9755,14 @@ export namespace StartChangeRequestExecutionResult { export interface StartSessionRequest { /** - *

                                                                      The instance to connect to for the session.

                                                                      + *

                                                                      The managed node to connect to for the session.

                                                                      */ Target: string | undefined; /** *

                                                                      The name of the SSM document to define the parameters and plugin settings for the session. * For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session. - * If no document name is provided, a shell to the instance is launched by default.

                                                                      + * If no document name is provided, a shell to the managed node is launched by default.

                                                                      */ DocumentName?: string; @@ -9793,13 +9795,13 @@ export interface StartSessionResponse { /** *

                                                                      An encrypted token value containing session and caller information. Used to authenticate the - * connection to the instance.

                                                                      + * connection to the managed node.

                                                                      */ TokenValue?: string; /** - *

                                                                      A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and - * receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output) + *

                                                                      A URL back to SSM Agent on the managed node that the Session Manager client uses to send commands and + * receive output from the node. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output) *

                                                                      *

                                                                      * region represents the Region identifier for an @@ -9823,10 +9825,10 @@ export namespace StartSessionResponse { } /** - *

                                                                      The specified target instance for the session isn't fully configured for use with Session Manager. For + *

                                                                      The specified target managed node for the session isn't fully configured for use with Session Manager. For * more information, see Getting started with * Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you - * attempt to start a session on an instance that is located in a different account or Region

                                                                      + * attempt to start a session on a managed node that is located in a different account or Region

                                                                      */ export interface TargetNotConnected extends __SmithyException, $MetadataBearer { name: "TargetNotConnected"; @@ -10046,7 +10048,7 @@ export interface UpdateAssociationRequest { /** *

                                                                      The name of the SSM Command document or Automation runbook that contains the configuration - * information for the instance.

                                                                      + * information for the managed node.

                                                                      *

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is * shared with you from another account.

                                                                      *

                                                                      For Systems Manager document (SSM document) that are shared with you from other Amazon Web Services accounts, you @@ -10095,8 +10097,8 @@ export interface UpdateAssociationRequest { * example 10, or a percentage of the target set, for example 10%. If you specify 3, for example, * the system stops sending requests when the fourth error is received. If you specify 0, then the * system stops sending requests after the first error is returned. If you run an association on 50 - * instances and set MaxError to 10%, then the system stops sending the request when - * the sixth error is received.

                                                                      + * managed nodes and set MaxError to 10%, then the system stops sending the request + * when the sixth error is received.

                                                                      *

                                                                      Executions that are already running an association when MaxErrors is reached * are allowed to complete, but some of these executions may fail as well. If you need to ensure * that there won't be more than max-errors failed executions, set MaxConcurrency to 1 @@ -10108,9 +10110,9 @@ export interface UpdateAssociationRequest { *

                                                                      The maximum number of targets allowed to run the association at the same time. You can * specify a number, for example 10, or a percentage of the target set, for example 10%. The default * value is 100%, which means all targets run the association at the same time.

                                                                      - *

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running + *

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running * MaxConcurrency associations, the association is allowed to run. During the next - * association interval, the new instance will process its association within the limit specified + * association interval, the new managed node will process its association within the limit specified * for MaxConcurrency.

                                                                      */ MaxConcurrency?: string; @@ -10215,7 +10217,7 @@ export interface UpdateAssociationStatusRequest { Name: string | undefined; /** - *

                                                                      The instance ID.

                                                                      + *

                                                                      The managed node ID.

                                                                      */ InstanceId: string | undefined; @@ -10506,14 +10508,3 @@ export namespace UpdateDocumentMetadataRequest { ...obj, }); } - -export interface UpdateDocumentMetadataResponse {} - -export namespace UpdateDocumentMetadataResponse { - /** - * @internal - */ - export const filterSensitiveLog = (obj: UpdateDocumentMetadataResponse): any => ({ - ...obj, - }); -} diff --git a/clients/client-ssm/src/models/models_2.ts b/clients/client-ssm/src/models/models_2.ts index 1f803a91ba17..b0f930fe40d4 100644 --- a/clients/client-ssm/src/models/models_2.ts +++ b/clients/client-ssm/src/models/models_2.ts @@ -28,6 +28,17 @@ import { ResultAttribute, } from "./models_1"; +export interface UpdateDocumentMetadataResponse {} + +export namespace UpdateDocumentMetadataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateDocumentMetadataResponse): any => ({ + ...obj, + }); +} + export interface UpdateMaintenanceWindowRequest { /** *

                                                                      The ID of the maintenance window to update.

                                                                      @@ -309,9 +320,9 @@ export interface UpdateMaintenanceWindowTaskRequest { WindowTaskId: string | undefined; /** - *

                                                                      The targets (either instances or tags) to modify. Instances are specified using the format - * Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the - * format Key=tag_name,Values=tag_value.

                                                                      + *

                                                                      The targets (either managed nodes or tags) to modify. Managed nodes are specified using the + * format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using + * the format Key=tag_name,Values=tag_value.

                                                                      * *

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks. * Depending on the task, targets are optional for other maintenance window task types (Automation, @@ -593,7 +604,7 @@ export namespace UpdateMaintenanceWindowTaskResult { export interface UpdateManagedInstanceRoleRequest { /** - *

                                                                      The ID of the managed instance where you want to update the role.

                                                                      + *

                                                                      The ID of the managed node where you want to update the role.

                                                                      */ InstanceId: string | undefined; @@ -845,8 +856,8 @@ export interface UpdatePatchBaselineRequest { /** *

                                                                      Indicates whether the list of approved patches includes non-security updates that should be - * applied to the instances. The default value is false. Applies to Linux instances - * only.

                                                                      + * applied to the managed nodes. The default value is false. Applies to Linux managed + * nodes only.

                                                                      */ ApprovedPatchesEnableNonSecurity?: boolean; @@ -891,8 +902,8 @@ export interface UpdatePatchBaselineRequest { Description?: string; /** - *

                                                                      Information about the patches to use to update the instances, including target operating - * systems and source repositories. Applies to Linux instances only.

                                                                      + *

                                                                      Information about the patches to use to update the managed nodes, including target operating + * systems and source repositories. Applies to Linux managed nodes only.

                                                                      */ Sources?: PatchSource[]; @@ -953,8 +964,8 @@ export interface UpdatePatchBaselineResult { /** *

                                                                      Indicates whether the list of approved patches includes non-security updates that should be - * applied to the instances. The default value is false. Applies to Linux instances - * only.

                                                                      + * applied to the managed nodes. The default value is false. Applies to Linux managed + * nodes only.

                                                                      */ ApprovedPatchesEnableNonSecurity?: boolean; @@ -986,8 +997,8 @@ export interface UpdatePatchBaselineResult { Description?: string; /** - *

                                                                      Information about the patches to use to update the instances, including target operating - * systems and source repositories. Applies to Linux instances only.

                                                                      + *

                                                                      Information about the patches to use to update the managed nodes, including target operating + * systems and source repositories. Applies to Linux managed nodes only.

                                                                      */ Sources?: PatchSource[]; } @@ -1259,7 +1270,7 @@ export interface GetInventoryRequest { /** *

                                                                      Returns counts of inventory types based on one or more expressions. For example, if you * aggregate by using an expression that uses the AWS:InstanceInformation.PlatformType - * type, you can see a count of how many Windows and Linux instances exist in your inventoried + * type, you can see a count of how many Windows and Linux managed nodes exist in your inventoried * fleet.

                                                                      */ Aggregators?: InventoryAggregator[]; diff --git a/clients/client-ssm/src/protocols/Aws_json1_1.ts b/clients/client-ssm/src/protocols/Aws_json1_1.ts index aa8e14de522a..fba1c58a63de 100644 --- a/clients/client-ssm/src/protocols/Aws_json1_1.ts +++ b/clients/client-ssm/src/protocols/Aws_json1_1.ts @@ -942,7 +942,6 @@ import { UpdateDocumentDefaultVersionRequest, UpdateDocumentDefaultVersionResult, UpdateDocumentMetadataRequest, - UpdateDocumentMetadataResponse, UpdateDocumentRequest, UpdateDocumentResult, } from "../models/models_1"; @@ -953,6 +952,7 @@ import { OpsAggregator, OpsMetadataKeyLimitExceededException, ResourceDataSyncConflictException, + UpdateDocumentMetadataResponse, UpdateMaintenanceWindowRequest, UpdateMaintenanceWindowResult, UpdateMaintenanceWindowTargetRequest, @@ -21321,6 +21321,8 @@ const deserializeAws_json1_1InstanceInformation = (output: any, context: __Serde ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.RegistrationDate))) : undefined, ResourceType: __expectString(output.ResourceType), + SourceId: __expectString(output.SourceId), + SourceType: __expectString(output.SourceType), } as any; }; diff --git a/clients/client-storage-gateway/src/models/models_0.ts b/clients/client-storage-gateway/src/models/models_0.ts index b9ff11af3fc3..17234d5319d0 100644 --- a/clients/client-storage-gateway/src/models/models_0.ts +++ b/clients/client-storage-gateway/src/models/models_0.ts @@ -121,7 +121,7 @@ export interface ActivateGatewayInput { * value is CACHED.

                                                                      * *

                                                                      Valid Values: STORED | CACHED | VTL | - * FILE_S3 | FILE_FSX_SMB| + * VTL_SNOW | FILE_S3 | FILE_FSX_SMB *

                                                                      */ GatewayType?: string; @@ -3564,7 +3564,7 @@ export namespace NetworkInterface { }); } -export type HostEnvironment = "EC2" | "HYPER-V" | "KVM" | "OTHER" | "VMWARE"; +export type HostEnvironment = "EC2" | "HYPER-V" | "KVM" | "OTHER" | "SNOWBALL" | "VMWARE"; /** *

                                                                      A JSON object containing the following fields:

                                                                      @@ -3652,7 +3652,7 @@ export interface DescribeGatewayInformationOutput { CloudWatchLogGroupARN?: string; /** - *

                                                                      The type of hypervisor environment used by the host.

                                                                      + *

                                                                      The type of hardware or software platform on which the gateway is running.

                                                                      */ HostEnvironment?: HostEnvironment | string; @@ -3685,6 +3685,13 @@ export interface DescribeGatewayInformationOutput { * hardware specifications.

                                                                      */ SupportedGatewayCapacities?: (GatewayCapacity | string)[]; + + /** + *

                                                                      A unique identifier for the specific instance of the host platform running the gateway. + * This value is only available for certain host environments, and its format depends on the + * host environment type.

                                                                      + */ + HostEnvironmentId?: string; } export namespace DescribeGatewayInformationOutput { @@ -5913,6 +5920,18 @@ export interface GatewayInfo { *

                                                                      The Amazon Web Services Region where the Amazon EC2 instance is located.

                                                                      */ Ec2InstanceRegion?: string; + + /** + *

                                                                      The type of hardware or software platform on which the gateway is running.

                                                                      + */ + HostEnvironment?: HostEnvironment | string; + + /** + *

                                                                      A unique identifier for the specific instance of the host platform running the gateway. + * This value is only available for certain host environments, and its format depends on the + * host environment type.

                                                                      + */ + HostEnvironmentId?: string; } export namespace GatewayInfo { diff --git a/clients/client-storage-gateway/src/protocols/Aws_json1_1.ts b/clients/client-storage-gateway/src/protocols/Aws_json1_1.ts index 004d14f03249..d58b06cfcf76 100644 --- a/clients/client-storage-gateway/src/protocols/Aws_json1_1.ts +++ b/clients/client-storage-gateway/src/protocols/Aws_json1_1.ts @@ -9122,6 +9122,7 @@ const deserializeAws_json1_1DescribeGatewayInformationOutput = ( GatewayTimezone: __expectString(output.GatewayTimezone), GatewayType: __expectString(output.GatewayType), HostEnvironment: __expectString(output.HostEnvironment), + HostEnvironmentId: __expectString(output.HostEnvironmentId), LastSoftwareUpdate: __expectString(output.LastSoftwareUpdate), NextUpdateAvailabilityDate: __expectString(output.NextUpdateAvailabilityDate), SoftwareUpdatesEndDate: __expectString(output.SoftwareUpdatesEndDate), @@ -9533,6 +9534,8 @@ const deserializeAws_json1_1GatewayInfo = (output: any, context: __SerdeContext) GatewayName: __expectString(output.GatewayName), GatewayOperationalState: __expectString(output.GatewayOperationalState), GatewayType: __expectString(output.GatewayType), + HostEnvironment: __expectString(output.HostEnvironment), + HostEnvironmentId: __expectString(output.HostEnvironmentId), } as any; }; diff --git a/clients/client-wellarchitected/README.md b/clients/client-wellarchitected/README.md index 247f40557506..842a07ae8428 100644 --- a/clients/client-wellarchitected/README.md +++ b/clients/client-wellarchitected/README.md @@ -7,13 +7,13 @@ AWS SDK for JavaScript WellArchitected Client for Node.js, Browser and React Native. -AWS Well-Architected Tool +Well-Architected Tool -

                                                                      This is the AWS Well-Architected Tool API Reference. The AWS Well-Architected Tool API provides programmatic access to the -AWS Well-Architected Tool in the -AWS Management Console. For information -about the AWS Well-Architected Tool, see the -AWS Well-Architected Tool User Guide.

                                                                      +

                                                                      This is the Well-Architected Tool API Reference. The WA Tool API provides programmatic access to the +Well-Architected Tool in the +Amazon Web Services Management Console. For information +about the Well-Architected Tool, see the +Well-Architected Tool User Guide.

                                                                      ## Installing diff --git a/clients/client-wellarchitected/src/WellArchitected.ts b/clients/client-wellarchitected/src/WellArchitected.ts index 25e4c46bef17..d2b45dd4b831 100644 --- a/clients/client-wellarchitected/src/WellArchitected.ts +++ b/clients/client-wellarchitected/src/WellArchitected.ts @@ -5,6 +5,16 @@ import { AssociateLensesCommandInput, AssociateLensesCommandOutput, } from "./commands/AssociateLensesCommand"; +import { + CreateLensShareCommand, + CreateLensShareCommandInput, + CreateLensShareCommandOutput, +} from "./commands/CreateLensShareCommand"; +import { + CreateLensVersionCommand, + CreateLensVersionCommandInput, + CreateLensVersionCommandOutput, +} from "./commands/CreateLensVersionCommand"; import { CreateMilestoneCommand, CreateMilestoneCommandInput, @@ -20,6 +30,12 @@ import { CreateWorkloadShareCommandInput, CreateWorkloadShareCommandOutput, } from "./commands/CreateWorkloadShareCommand"; +import { DeleteLensCommand, DeleteLensCommandInput, DeleteLensCommandOutput } from "./commands/DeleteLensCommand"; +import { + DeleteLensShareCommand, + DeleteLensShareCommandInput, + DeleteLensShareCommandOutput, +} from "./commands/DeleteLensShareCommand"; import { DeleteWorkloadCommand, DeleteWorkloadCommandInput, @@ -35,7 +51,9 @@ import { DisassociateLensesCommandInput, DisassociateLensesCommandOutput, } from "./commands/DisassociateLensesCommand"; +import { ExportLensCommand, ExportLensCommandInput, ExportLensCommandOutput } from "./commands/ExportLensCommand"; import { GetAnswerCommand, GetAnswerCommandInput, GetAnswerCommandOutput } from "./commands/GetAnswerCommand"; +import { GetLensCommand, GetLensCommandInput, GetLensCommandOutput } from "./commands/GetLensCommand"; import { GetLensReviewCommand, GetLensReviewCommandInput, @@ -57,6 +75,7 @@ import { GetMilestoneCommandOutput, } from "./commands/GetMilestoneCommand"; import { GetWorkloadCommand, GetWorkloadCommandInput, GetWorkloadCommandOutput } from "./commands/GetWorkloadCommand"; +import { ImportLensCommand, ImportLensCommandInput, ImportLensCommandOutput } from "./commands/ImportLensCommand"; import { ListAnswersCommand, ListAnswersCommandInput, ListAnswersCommandOutput } from "./commands/ListAnswersCommand"; import { ListLensesCommand, ListLensesCommandInput, ListLensesCommandOutput } from "./commands/ListLensesCommand"; import { @@ -69,6 +88,11 @@ import { ListLensReviewsCommandInput, ListLensReviewsCommandOutput, } from "./commands/ListLensReviewsCommand"; +import { + ListLensSharesCommand, + ListLensSharesCommandInput, + ListLensSharesCommandOutput, +} from "./commands/ListLensSharesCommand"; import { ListMilestonesCommand, ListMilestonesCommandInput, @@ -138,17 +162,27 @@ import { import { WellArchitectedClient } from "./WellArchitectedClient"; /** - * AWS Well-Architected Tool + * Well-Architected Tool * - *

                                                                      This is the AWS Well-Architected Tool API Reference. The AWS Well-Architected Tool API provides programmatic access to the - * AWS Well-Architected Tool in the - * AWS Management Console. For information - * about the AWS Well-Architected Tool, see the - * AWS Well-Architected Tool User Guide.

                                                                      + *

                                                                      This is the Well-Architected Tool API Reference. The WA Tool API provides programmatic access to the + * Well-Architected Tool in the + * Amazon Web Services Management Console. For information + * about the Well-Architected Tool, see the + * Well-Architected Tool User Guide.

                                                                      */ export class WellArchitected extends WellArchitectedClient { /** *

                                                                      Associate a lens to a workload.

                                                                      + *

                                                                      Up to 10 lenses can be associated with a workload in a single API operation. A + * maximum of 20 lenses can be associated with a workload.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By accessing and/or applying custom lenses created by another Amazon Web Services user or account, + * you acknowledge that custom lenses created by other users and shared with you are + * Third Party Content as defined in the Amazon Web Services Customer Agreement.

                                                                      + *
                                                                      */ public associateLenses( args: AssociateLensesCommandInput, @@ -179,6 +213,87 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      Create a lens share.

                                                                      + *

                                                                      The owner of a lens can share it with other Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. + * Shared access to a lens is not removed until the lens invitation is deleted.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + */ + public createLensShare( + args: CreateLensShareCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createLensShare( + args: CreateLensShareCommandInput, + cb: (err: any, data?: CreateLensShareCommandOutput) => void + ): void; + public createLensShare( + args: CreateLensShareCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateLensShareCommandOutput) => void + ): void; + public createLensShare( + args: CreateLensShareCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateLensShareCommandOutput) => void), + cb?: (err: any, data?: CreateLensShareCommandOutput) => void + ): Promise | void { + const command = new CreateLensShareCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Create a new lens version.

                                                                      + *

                                                                      A lens can have up to 100 versions.

                                                                      + *

                                                                      After a lens has been imported, create a new lens version to publish it. The owner of a lens can share the lens with other + * Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. Only the owner of a lens can delete it. + *

                                                                      + */ + public createLensVersion( + args: CreateLensVersionCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createLensVersion( + args: CreateLensVersionCommandInput, + cb: (err: any, data?: CreateLensVersionCommandOutput) => void + ): void; + public createLensVersion( + args: CreateLensVersionCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateLensVersionCommandOutput) => void + ): void; + public createLensVersion( + args: CreateLensVersionCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateLensVersionCommandOutput) => void), + cb?: (err: any, data?: CreateLensVersionCommandOutput) => void + ): Promise | void { + const command = new CreateLensVersionCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      Create a milestone for an existing workload.

                                                                      */ @@ -213,10 +328,10 @@ export class WellArchitected extends WellArchitectedClient { /** *

                                                                      Create a new workload.

                                                                      - *

                                                                      The owner of a workload can share the workload with other AWS accounts and IAM users - * in the same AWS Region. Only the owner of a workload can delete it.

                                                                      + *

                                                                      The owner of a workload can share the workload with other Amazon Web Services accounts and IAM users + * in the same Amazon Web Services Region. Only the owner of a workload can delete it.

                                                                      *

                                                                      For more information, see Defining a Workload in the - * AWS Well-Architected Tool User Guide.

                                                                      + * Well-Architected Tool User Guide.

                                                                      */ public createWorkload( args: CreateWorkloadCommandInput, @@ -249,11 +364,11 @@ export class WellArchitected extends WellArchitectedClient { /** *

                                                                      Create a workload share.

                                                                      - *

                                                                      The owner of a workload can share it with other AWS accounts and IAM users in the same - * AWS Region. Shared access to a workload is not removed until the workload invitation is + *

                                                                      The owner of a workload can share it with other Amazon Web Services accounts and IAM users in the same + * Amazon Web Services Region. Shared access to a workload is not removed until the workload invitation is * deleted.

                                                                      *

                                                                      For more information, see Sharing a Workload in the - * AWS Well-Architected Tool User Guide.

                                                                      + * Well-Architected Tool User Guide.

                                                                      */ public createWorkloadShare( args: CreateWorkloadShareCommandInput, @@ -284,6 +399,91 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      Delete an existing lens.

                                                                      + *

                                                                      Only the owner of a lens can delete it. After the lens is deleted, Amazon Web Services accounts and IAM users + * that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads. + *

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + */ + public deleteLens(args: DeleteLensCommandInput, options?: __HttpHandlerOptions): Promise; + public deleteLens(args: DeleteLensCommandInput, cb: (err: any, data?: DeleteLensCommandOutput) => void): void; + public deleteLens( + args: DeleteLensCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteLensCommandOutput) => void + ): void; + public deleteLens( + args: DeleteLensCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteLensCommandOutput) => void), + cb?: (err: any, data?: DeleteLensCommandOutput) => void + ): Promise | void { + const command = new DeleteLensCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Delete a lens share.

                                                                      + *

                                                                      After the lens share is deleted, Amazon Web Services accounts and IAM users + * that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + */ + public deleteLensShare( + args: DeleteLensShareCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteLensShare( + args: DeleteLensShareCommandInput, + cb: (err: any, data?: DeleteLensShareCommandOutput) => void + ): void; + public deleteLensShare( + args: DeleteLensShareCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteLensShareCommandOutput) => void + ): void; + public deleteLensShare( + args: DeleteLensShareCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteLensShareCommandOutput) => void), + cb?: (err: any, data?: DeleteLensShareCommandOutput) => void + ): Promise | void { + const command = new DeleteLensShareCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      Delete an existing workload.

                                                                      */ @@ -350,8 +550,9 @@ export class WellArchitected extends WellArchitectedClient { /** *

                                                                      Disassociate a lens from a workload.

                                                                      + *

                                                                      Up to 10 lenses can be disassociated from a workload in a single API operation.

                                                                      * - *

                                                                      The AWS Well-Architected Framework lens (wellarchitected) cannot be + *

                                                                      The Amazon Web Services Well-Architected Framework lens (wellarchitected) cannot be * removed from a workload.

                                                                      *
                                                                      */ @@ -384,6 +585,46 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      Export an existing lens.

                                                                      + *

                                                                      Lenses are defined in JSON. For more information, see JSON format specification + * in the Well-Architected Tool User Guide. Only the owner of a lens can export it. + *

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      Do not include or gather personal identifiable information (PII) of end users or + * other identifiable individuals in or via your custom lenses. If your custom + * lens or those shared with you and used in your account do include or collect + * PII you are responsible for: ensuring that the included PII is processed in accordance + * with applicable law, providing adequate privacy notices, and obtaining necessary + * consents for processing such data.

                                                                      + *
                                                                      + */ + public exportLens(args: ExportLensCommandInput, options?: __HttpHandlerOptions): Promise; + public exportLens(args: ExportLensCommandInput, cb: (err: any, data?: ExportLensCommandOutput) => void): void; + public exportLens( + args: ExportLensCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ExportLensCommandOutput) => void + ): void; + public exportLens( + args: ExportLensCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ExportLensCommandOutput) => void), + cb?: (err: any, data?: ExportLensCommandOutput) => void + ): Promise | void { + const command = new ExportLensCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      Get the answer to a specific question in a workload review.

                                                                      */ @@ -410,6 +651,32 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      Get an existing lens.

                                                                      + */ + public getLens(args: GetLensCommandInput, options?: __HttpHandlerOptions): Promise; + public getLens(args: GetLensCommandInput, cb: (err: any, data?: GetLensCommandOutput) => void): void; + public getLens( + args: GetLensCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetLensCommandOutput) => void + ): void; + public getLens( + args: GetLensCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetLensCommandOutput) => void), + cb?: (err: any, data?: GetLensCommandOutput) => void + ): Promise | void { + const command = new GetLensCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      Get lens review.

                                                                      */ @@ -561,6 +828,50 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      Import a new lens.

                                                                      + *

                                                                      The lens cannot be applied to workloads or shared with other Amazon Web Services accounts + * until it's published with CreateLensVersion + *

                                                                      + *

                                                                      Lenses are defined in JSON. For more information, see JSON format specification + * in the Well-Architected Tool User Guide.

                                                                      + *

                                                                      A custom lens cannot exceed 500 KB in size.

                                                                      + * + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      Do not include or gather personal identifiable information (PII) of end users or + * other identifiable individuals in or via your custom lenses. If your custom + * lens or those shared with you and used in your account do include or collect + * PII you are responsible for: ensuring that the included PII is processed in accordance + * with applicable law, providing adequate privacy notices, and obtaining necessary + * consents for processing such data.

                                                                      + *
                                                                      + */ + public importLens(args: ImportLensCommandInput, options?: __HttpHandlerOptions): Promise; + public importLens(args: ImportLensCommandInput, cb: (err: any, data?: ImportLensCommandOutput) => void): void; + public importLens( + args: ImportLensCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ImportLensCommandOutput) => void + ): void; + public importLens( + args: ImportLensCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ImportLensCommandOutput) => void), + cb?: (err: any, data?: ImportLensCommandOutput) => void + ): Promise | void { + const command = new ImportLensCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      List of answers.

                                                                      */ @@ -677,6 +988,38 @@ export class WellArchitected extends WellArchitectedClient { } } + /** + *

                                                                      List the lens shares associated with the lens.

                                                                      + */ + public listLensShares( + args: ListLensSharesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listLensShares( + args: ListLensSharesCommandInput, + cb: (err: any, data?: ListLensSharesCommandOutput) => void + ): void; + public listLensShares( + args: ListLensSharesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListLensSharesCommandOutput) => void + ): void; + public listLensShares( + args: ListLensSharesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListLensSharesCommandOutput) => void), + cb?: (err: any, data?: ListLensSharesCommandOutput) => void + ): Promise | void { + const command = new ListLensSharesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + /** *

                                                                      List all milestones for an existing workload.

                                                                      */ diff --git a/clients/client-wellarchitected/src/WellArchitectedClient.ts b/clients/client-wellarchitected/src/WellArchitectedClient.ts index 1e6630e6aa11..d7ccd91eb281 100644 --- a/clients/client-wellarchitected/src/WellArchitectedClient.ts +++ b/clients/client-wellarchitected/src/WellArchitectedClient.ts @@ -50,19 +50,25 @@ import { } from "@aws-sdk/types"; import { AssociateLensesCommandInput, AssociateLensesCommandOutput } from "./commands/AssociateLensesCommand"; +import { CreateLensShareCommandInput, CreateLensShareCommandOutput } from "./commands/CreateLensShareCommand"; +import { CreateLensVersionCommandInput, CreateLensVersionCommandOutput } from "./commands/CreateLensVersionCommand"; import { CreateMilestoneCommandInput, CreateMilestoneCommandOutput } from "./commands/CreateMilestoneCommand"; import { CreateWorkloadCommandInput, CreateWorkloadCommandOutput } from "./commands/CreateWorkloadCommand"; import { CreateWorkloadShareCommandInput, CreateWorkloadShareCommandOutput, } from "./commands/CreateWorkloadShareCommand"; +import { DeleteLensCommandInput, DeleteLensCommandOutput } from "./commands/DeleteLensCommand"; +import { DeleteLensShareCommandInput, DeleteLensShareCommandOutput } from "./commands/DeleteLensShareCommand"; import { DeleteWorkloadCommandInput, DeleteWorkloadCommandOutput } from "./commands/DeleteWorkloadCommand"; import { DeleteWorkloadShareCommandInput, DeleteWorkloadShareCommandOutput, } from "./commands/DeleteWorkloadShareCommand"; import { DisassociateLensesCommandInput, DisassociateLensesCommandOutput } from "./commands/DisassociateLensesCommand"; +import { ExportLensCommandInput, ExportLensCommandOutput } from "./commands/ExportLensCommand"; import { GetAnswerCommandInput, GetAnswerCommandOutput } from "./commands/GetAnswerCommand"; +import { GetLensCommandInput, GetLensCommandOutput } from "./commands/GetLensCommand"; import { GetLensReviewCommandInput, GetLensReviewCommandOutput } from "./commands/GetLensReviewCommand"; import { GetLensReviewReportCommandInput, @@ -74,6 +80,7 @@ import { } from "./commands/GetLensVersionDifferenceCommand"; import { GetMilestoneCommandInput, GetMilestoneCommandOutput } from "./commands/GetMilestoneCommand"; import { GetWorkloadCommandInput, GetWorkloadCommandOutput } from "./commands/GetWorkloadCommand"; +import { ImportLensCommandInput, ImportLensCommandOutput } from "./commands/ImportLensCommand"; import { ListAnswersCommandInput, ListAnswersCommandOutput } from "./commands/ListAnswersCommand"; import { ListLensesCommandInput, ListLensesCommandOutput } from "./commands/ListLensesCommand"; import { @@ -81,6 +88,7 @@ import { ListLensReviewImprovementsCommandOutput, } from "./commands/ListLensReviewImprovementsCommand"; import { ListLensReviewsCommandInput, ListLensReviewsCommandOutput } from "./commands/ListLensReviewsCommand"; +import { ListLensSharesCommandInput, ListLensSharesCommandOutput } from "./commands/ListLensSharesCommand"; import { ListMilestonesCommandInput, ListMilestonesCommandOutput } from "./commands/ListMilestonesCommand"; import { ListNotificationsCommandInput, ListNotificationsCommandOutput } from "./commands/ListNotificationsCommand"; import { @@ -111,21 +119,29 @@ import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; export type ServiceInputTypes = | AssociateLensesCommandInput + | CreateLensShareCommandInput + | CreateLensVersionCommandInput | CreateMilestoneCommandInput | CreateWorkloadCommandInput | CreateWorkloadShareCommandInput + | DeleteLensCommandInput + | DeleteLensShareCommandInput | DeleteWorkloadCommandInput | DeleteWorkloadShareCommandInput | DisassociateLensesCommandInput + | ExportLensCommandInput | GetAnswerCommandInput + | GetLensCommandInput | GetLensReviewCommandInput | GetLensReviewReportCommandInput | GetLensVersionDifferenceCommandInput | GetMilestoneCommandInput | GetWorkloadCommandInput + | ImportLensCommandInput | ListAnswersCommandInput | ListLensReviewImprovementsCommandInput | ListLensReviewsCommandInput + | ListLensSharesCommandInput | ListLensesCommandInput | ListMilestonesCommandInput | ListNotificationsCommandInput @@ -144,21 +160,29 @@ export type ServiceInputTypes = export type ServiceOutputTypes = | AssociateLensesCommandOutput + | CreateLensShareCommandOutput + | CreateLensVersionCommandOutput | CreateMilestoneCommandOutput | CreateWorkloadCommandOutput | CreateWorkloadShareCommandOutput + | DeleteLensCommandOutput + | DeleteLensShareCommandOutput | DeleteWorkloadCommandOutput | DeleteWorkloadShareCommandOutput | DisassociateLensesCommandOutput + | ExportLensCommandOutput | GetAnswerCommandOutput + | GetLensCommandOutput | GetLensReviewCommandOutput | GetLensReviewReportCommandOutput | GetLensVersionDifferenceCommandOutput | GetMilestoneCommandOutput | GetWorkloadCommandOutput + | ImportLensCommandOutput | ListAnswersCommandOutput | ListLensReviewImprovementsCommandOutput | ListLensReviewsCommandOutput + | ListLensSharesCommandOutput | ListLensesCommandOutput | ListMilestonesCommandOutput | ListNotificationsCommandOutput @@ -324,13 +348,13 @@ type WellArchitectedClientResolvedConfigType = __SmithyResolvedConfiguration<__H export interface WellArchitectedClientResolvedConfig extends WellArchitectedClientResolvedConfigType {} /** - * AWS Well-Architected Tool + * Well-Architected Tool * - *

                                                                      This is the AWS Well-Architected Tool API Reference. The AWS Well-Architected Tool API provides programmatic access to the - * AWS Well-Architected Tool in the - * AWS Management Console. For information - * about the AWS Well-Architected Tool, see the - * AWS Well-Architected Tool User Guide.

                                                                      + *

                                                                      This is the Well-Architected Tool API Reference. The WA Tool API provides programmatic access to the + * Well-Architected Tool in the + * Amazon Web Services Management Console. For information + * about the Well-Architected Tool, see the + * Well-Architected Tool User Guide.

                                                                      */ export class WellArchitectedClient extends __Client< __HttpHandlerOptions, diff --git a/clients/client-wellarchitected/src/commands/AssociateLensesCommand.ts b/clients/client-wellarchitected/src/commands/AssociateLensesCommand.ts index f26d09b7f10d..e47edef45088 100644 --- a/clients/client-wellarchitected/src/commands/AssociateLensesCommand.ts +++ b/clients/client-wellarchitected/src/commands/AssociateLensesCommand.ts @@ -23,6 +23,16 @@ export interface AssociateLensesCommandOutput extends __MetadataBearer {} /** *

                                                                      Associate a lens to a workload.

                                                                      + *

                                                                      Up to 10 lenses can be associated with a workload in a single API operation. A + * maximum of 20 lenses can be associated with a workload.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By accessing and/or applying custom lenses created by another Amazon Web Services user or account, + * you acknowledge that custom lenses created by other users and shared with you are + * Third Party Content as defined in the Amazon Web Services Customer Agreement.

                                                                      + *
                                                                      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-wellarchitected/src/commands/CreateLensShareCommand.ts b/clients/client-wellarchitected/src/commands/CreateLensShareCommand.ts new file mode 100644 index 000000000000..ac77db7f0def --- /dev/null +++ b/clients/client-wellarchitected/src/commands/CreateLensShareCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateLensShareInput, CreateLensShareOutput } from "../models/models_0"; +import { + deserializeAws_restJson1CreateLensShareCommand, + serializeAws_restJson1CreateLensShareCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface CreateLensShareCommandInput extends CreateLensShareInput {} +export interface CreateLensShareCommandOutput extends CreateLensShareOutput, __MetadataBearer {} + +/** + *

                                                                      Create a lens share.

                                                                      + *

                                                                      The owner of a lens can share it with other Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. + * Shared access to a lens is not removed until the lens invitation is deleted.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, CreateLensShareCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, CreateLensShareCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new CreateLensShareCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateLensShareCommandInput} for command's `input` shape. + * @see {@link CreateLensShareCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class CreateLensShareCommand extends $Command< + CreateLensShareCommandInput, + CreateLensShareCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateLensShareCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "CreateLensShareCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateLensShareInput.filterSensitiveLog, + outputFilterSensitiveLog: CreateLensShareOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateLensShareCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateLensShareCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateLensShareCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/CreateLensVersionCommand.ts b/clients/client-wellarchitected/src/commands/CreateLensVersionCommand.ts new file mode 100644 index 000000000000..c51512a2953c --- /dev/null +++ b/clients/client-wellarchitected/src/commands/CreateLensVersionCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateLensVersionInput, CreateLensVersionOutput } from "../models/models_0"; +import { + deserializeAws_restJson1CreateLensVersionCommand, + serializeAws_restJson1CreateLensVersionCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface CreateLensVersionCommandInput extends CreateLensVersionInput {} +export interface CreateLensVersionCommandOutput extends CreateLensVersionOutput, __MetadataBearer {} + +/** + *

                                                                      Create a new lens version.

                                                                      + *

                                                                      A lens can have up to 100 versions.

                                                                      + *

                                                                      After a lens has been imported, create a new lens version to publish it. The owner of a lens can share the lens with other + * Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. Only the owner of a lens can delete it. + *

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, CreateLensVersionCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, CreateLensVersionCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new CreateLensVersionCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateLensVersionCommandInput} for command's `input` shape. + * @see {@link CreateLensVersionCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class CreateLensVersionCommand extends $Command< + CreateLensVersionCommandInput, + CreateLensVersionCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateLensVersionCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "CreateLensVersionCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateLensVersionInput.filterSensitiveLog, + outputFilterSensitiveLog: CreateLensVersionOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateLensVersionCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateLensVersionCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateLensVersionCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/CreateWorkloadCommand.ts b/clients/client-wellarchitected/src/commands/CreateWorkloadCommand.ts index f77fe0b20659..143c6f3e26bf 100644 --- a/clients/client-wellarchitected/src/commands/CreateWorkloadCommand.ts +++ b/clients/client-wellarchitected/src/commands/CreateWorkloadCommand.ts @@ -23,10 +23,10 @@ export interface CreateWorkloadCommandOutput extends CreateWorkloadOutput, __Met /** *

                                                                      Create a new workload.

                                                                      - *

                                                                      The owner of a workload can share the workload with other AWS accounts and IAM users - * in the same AWS Region. Only the owner of a workload can delete it.

                                                                      + *

                                                                      The owner of a workload can share the workload with other Amazon Web Services accounts and IAM users + * in the same Amazon Web Services Region. Only the owner of a workload can delete it.

                                                                      *

                                                                      For more information, see Defining a Workload in the - * AWS Well-Architected Tool User Guide.

                                                                      + * Well-Architected Tool User Guide.

                                                                      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-wellarchitected/src/commands/CreateWorkloadShareCommand.ts b/clients/client-wellarchitected/src/commands/CreateWorkloadShareCommand.ts index a52671afeb01..4a3dc7332ce2 100644 --- a/clients/client-wellarchitected/src/commands/CreateWorkloadShareCommand.ts +++ b/clients/client-wellarchitected/src/commands/CreateWorkloadShareCommand.ts @@ -23,11 +23,11 @@ export interface CreateWorkloadShareCommandOutput extends CreateWorkloadShareOut /** *

                                                                      Create a workload share.

                                                                      - *

                                                                      The owner of a workload can share it with other AWS accounts and IAM users in the same - * AWS Region. Shared access to a workload is not removed until the workload invitation is + *

                                                                      The owner of a workload can share it with other Amazon Web Services accounts and IAM users in the same + * Amazon Web Services Region. Shared access to a workload is not removed until the workload invitation is * deleted.

                                                                      *

                                                                      For more information, see Sharing a Workload in the - * AWS Well-Architected Tool User Guide.

                                                                      + * Well-Architected Tool User Guide.

                                                                      * @example * Use a bare-bones client and the command you need to make an API call. * ```javascript diff --git a/clients/client-wellarchitected/src/commands/DeleteLensCommand.ts b/clients/client-wellarchitected/src/commands/DeleteLensCommand.ts new file mode 100644 index 000000000000..ff8e517da00d --- /dev/null +++ b/clients/client-wellarchitected/src/commands/DeleteLensCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteLensInput } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteLensCommand, + serializeAws_restJson1DeleteLensCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface DeleteLensCommandInput extends DeleteLensInput {} +export interface DeleteLensCommandOutput extends __MetadataBearer {} + +/** + *

                                                                      Delete an existing lens.

                                                                      + *

                                                                      Only the owner of a lens can delete it. After the lens is deleted, Amazon Web Services accounts and IAM users + * that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads. + *

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, DeleteLensCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, DeleteLensCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new DeleteLensCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteLensCommandInput} for command's `input` shape. + * @see {@link DeleteLensCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class DeleteLensCommand extends $Command< + DeleteLensCommandInput, + DeleteLensCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteLensCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "DeleteLensCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteLensInput.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteLensCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteLensCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteLensCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/DeleteLensShareCommand.ts b/clients/client-wellarchitected/src/commands/DeleteLensShareCommand.ts new file mode 100644 index 000000000000..9edb30de2e52 --- /dev/null +++ b/clients/client-wellarchitected/src/commands/DeleteLensShareCommand.ts @@ -0,0 +1,108 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteLensShareInput } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteLensShareCommand, + serializeAws_restJson1DeleteLensShareCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface DeleteLensShareCommandInput extends DeleteLensShareInput {} +export interface DeleteLensShareCommandOutput extends __MetadataBearer {} + +/** + *

                                                                      Delete a lens share.

                                                                      + *

                                                                      After the lens share is deleted, Amazon Web Services accounts and IAM users + * that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, + * you acknowledge that Amazon Web Services will make your custom lenses available to those + * other accounts. Those other accounts may continue to access and use your + * shared custom lenses even if you delete the custom lenses + * from your own Amazon Web Services account or terminate + * your Amazon Web Services account.

                                                                      + *
                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, DeleteLensShareCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, DeleteLensShareCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new DeleteLensShareCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteLensShareCommandInput} for command's `input` shape. + * @see {@link DeleteLensShareCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class DeleteLensShareCommand extends $Command< + DeleteLensShareCommandInput, + DeleteLensShareCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteLensShareCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "DeleteLensShareCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteLensShareInput.filterSensitiveLog, + outputFilterSensitiveLog: (output: any) => output, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteLensShareCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteLensShareCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteLensShareCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/DisassociateLensesCommand.ts b/clients/client-wellarchitected/src/commands/DisassociateLensesCommand.ts index 6904d85bf5cd..1da307cd9ee2 100644 --- a/clients/client-wellarchitected/src/commands/DisassociateLensesCommand.ts +++ b/clients/client-wellarchitected/src/commands/DisassociateLensesCommand.ts @@ -23,8 +23,9 @@ export interface DisassociateLensesCommandOutput extends __MetadataBearer {} /** *

                                                                      Disassociate a lens from a workload.

                                                                      + *

                                                                      Up to 10 lenses can be disassociated from a workload in a single API operation.

                                                                      * - *

                                                                      The AWS Well-Architected Framework lens (wellarchitected) cannot be + *

                                                                      The Amazon Web Services Well-Architected Framework lens (wellarchitected) cannot be * removed from a workload.

                                                                      *
                                                                      * @example diff --git a/clients/client-wellarchitected/src/commands/ExportLensCommand.ts b/clients/client-wellarchitected/src/commands/ExportLensCommand.ts new file mode 100644 index 000000000000..897a032d577f --- /dev/null +++ b/clients/client-wellarchitected/src/commands/ExportLensCommand.ts @@ -0,0 +1,109 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ExportLensInput, ExportLensOutput } from "../models/models_0"; +import { + deserializeAws_restJson1ExportLensCommand, + serializeAws_restJson1ExportLensCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface ExportLensCommandInput extends ExportLensInput {} +export interface ExportLensCommandOutput extends ExportLensOutput, __MetadataBearer {} + +/** + *

                                                                      Export an existing lens.

                                                                      + *

                                                                      Lenses are defined in JSON. For more information, see JSON format specification + * in the Well-Architected Tool User Guide. Only the owner of a lens can export it. + *

                                                                      + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      Do not include or gather personal identifiable information (PII) of end users or + * other identifiable individuals in or via your custom lenses. If your custom + * lens or those shared with you and used in your account do include or collect + * PII you are responsible for: ensuring that the included PII is processed in accordance + * with applicable law, providing adequate privacy notices, and obtaining necessary + * consents for processing such data.

                                                                      + *
                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, ExportLensCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, ExportLensCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new ExportLensCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ExportLensCommandInput} for command's `input` shape. + * @see {@link ExportLensCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class ExportLensCommand extends $Command< + ExportLensCommandInput, + ExportLensCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ExportLensCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "ExportLensCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ExportLensInput.filterSensitiveLog, + outputFilterSensitiveLog: ExportLensOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ExportLensCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ExportLensCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ExportLensCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/GetLensCommand.ts b/clients/client-wellarchitected/src/commands/GetLensCommand.ts new file mode 100644 index 000000000000..15e33a7e6fba --- /dev/null +++ b/clients/client-wellarchitected/src/commands/GetLensCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetLensInput, GetLensOutput } from "../models/models_0"; +import { + deserializeAws_restJson1GetLensCommand, + serializeAws_restJson1GetLensCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface GetLensCommandInput extends GetLensInput {} +export interface GetLensCommandOutput extends GetLensOutput, __MetadataBearer {} + +/** + *

                                                                      Get an existing lens.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, GetLensCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, GetLensCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new GetLensCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetLensCommandInput} for command's `input` shape. + * @see {@link GetLensCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class GetLensCommand extends $Command< + GetLensCommandInput, + GetLensCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetLensCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "GetLensCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetLensInput.filterSensitiveLog, + outputFilterSensitiveLog: GetLensOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetLensCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetLensCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetLensCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/ImportLensCommand.ts b/clients/client-wellarchitected/src/commands/ImportLensCommand.ts new file mode 100644 index 000000000000..aefe528ba21c --- /dev/null +++ b/clients/client-wellarchitected/src/commands/ImportLensCommand.ts @@ -0,0 +1,113 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ImportLensInput, ImportLensOutput } from "../models/models_0"; +import { + deserializeAws_restJson1ImportLensCommand, + serializeAws_restJson1ImportLensCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface ImportLensCommandInput extends ImportLensInput {} +export interface ImportLensCommandOutput extends ImportLensOutput, __MetadataBearer {} + +/** + *

                                                                      Import a new lens.

                                                                      + *

                                                                      The lens cannot be applied to workloads or shared with other Amazon Web Services accounts + * until it's published with CreateLensVersion + *

                                                                      + *

                                                                      Lenses are defined in JSON. For more information, see JSON format specification + * in the Well-Architected Tool User Guide.

                                                                      + *

                                                                      A custom lens cannot exceed 500 KB in size.

                                                                      + * + * + *

                                                                      + * Disclaimer + *

                                                                      + *

                                                                      Do not include or gather personal identifiable information (PII) of end users or + * other identifiable individuals in or via your custom lenses. If your custom + * lens or those shared with you and used in your account do include or collect + * PII you are responsible for: ensuring that the included PII is processed in accordance + * with applicable law, providing adequate privacy notices, and obtaining necessary + * consents for processing such data.

                                                                      + *
                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, ImportLensCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, ImportLensCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new ImportLensCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ImportLensCommandInput} for command's `input` shape. + * @see {@link ImportLensCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class ImportLensCommand extends $Command< + ImportLensCommandInput, + ImportLensCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ImportLensCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "ImportLensCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ImportLensInput.filterSensitiveLog, + outputFilterSensitiveLog: ImportLensOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ImportLensCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ImportLensCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ImportLensCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/ListLensSharesCommand.ts b/clients/client-wellarchitected/src/commands/ListLensSharesCommand.ts new file mode 100644 index 000000000000..484cc4976482 --- /dev/null +++ b/clients/client-wellarchitected/src/commands/ListLensSharesCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListLensSharesInput, ListLensSharesOutput } from "../models/models_0"; +import { + deserializeAws_restJson1ListLensSharesCommand, + serializeAws_restJson1ListLensSharesCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WellArchitectedClientResolvedConfig } from "../WellArchitectedClient"; + +export interface ListLensSharesCommandInput extends ListLensSharesInput {} +export interface ListLensSharesCommandOutput extends ListLensSharesOutput, __MetadataBearer {} + +/** + *

                                                                      List the lens shares associated with the lens.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WellArchitectedClient, ListLensSharesCommand } from "@aws-sdk/client-wellarchitected"; // ES Modules import + * // const { WellArchitectedClient, ListLensSharesCommand } = require("@aws-sdk/client-wellarchitected"); // CommonJS import + * const client = new WellArchitectedClient(config); + * const command = new ListLensSharesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListLensSharesCommandInput} for command's `input` shape. + * @see {@link ListLensSharesCommandOutput} for command's `response` shape. + * @see {@link WellArchitectedClientResolvedConfig | config} for WellArchitectedClient's `config` shape. + * + */ +export class ListLensSharesCommand extends $Command< + ListLensSharesCommandInput, + ListLensSharesCommandOutput, + WellArchitectedClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListLensSharesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WellArchitectedClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WellArchitectedClient"; + const commandName = "ListLensSharesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListLensSharesInput.filterSensitiveLog, + outputFilterSensitiveLog: ListLensSharesOutput.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListLensSharesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListLensSharesCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListLensSharesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-wellarchitected/src/commands/index.ts b/clients/client-wellarchitected/src/commands/index.ts index a9ed1d5ab5af..98d79c017976 100644 --- a/clients/client-wellarchitected/src/commands/index.ts +++ b/clients/client-wellarchitected/src/commands/index.ts @@ -1,19 +1,27 @@ export * from "./AssociateLensesCommand"; +export * from "./CreateLensShareCommand"; +export * from "./CreateLensVersionCommand"; export * from "./CreateMilestoneCommand"; export * from "./CreateWorkloadCommand"; export * from "./CreateWorkloadShareCommand"; +export * from "./DeleteLensCommand"; +export * from "./DeleteLensShareCommand"; export * from "./DeleteWorkloadCommand"; export * from "./DeleteWorkloadShareCommand"; export * from "./DisassociateLensesCommand"; +export * from "./ExportLensCommand"; export * from "./GetAnswerCommand"; +export * from "./GetLensCommand"; export * from "./GetLensReviewCommand"; export * from "./GetLensReviewReportCommand"; export * from "./GetLensVersionDifferenceCommand"; export * from "./GetMilestoneCommand"; export * from "./GetWorkloadCommand"; +export * from "./ImportLensCommand"; export * from "./ListAnswersCommand"; export * from "./ListLensReviewImprovementsCommand"; export * from "./ListLensReviewsCommand"; +export * from "./ListLensSharesCommand"; export * from "./ListLensesCommand"; export * from "./ListMilestonesCommand"; export * from "./ListNotificationsCommand"; diff --git a/clients/client-wellarchitected/src/models/models_0.ts b/clients/client-wellarchitected/src/models/models_0.ts index c85b36d3e22c..526dd8a83157 100644 --- a/clients/client-wellarchitected/src/models/models_0.ts +++ b/clients/client-wellarchitected/src/models/models_0.ts @@ -69,6 +69,30 @@ export namespace ChoiceAnswer { }); } +/** + *

                                                                      The choice content.

                                                                      + */ +export interface ChoiceContent { + /** + *

                                                                      The display text for the choice content.

                                                                      + */ + DisplayText?: string; + + /** + *

                                                                      The URL for the choice content.

                                                                      + */ + Url?: string; +} + +export namespace ChoiceContent { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ChoiceContent): any => ({ + ...obj, + }); +} + /** *

                                                                      A choice available to answer question.

                                                                      */ @@ -87,6 +111,16 @@ export interface Choice { *

                                                                      The description of a choice.

                                                                      */ Description?: string; + + /** + *

                                                                      The choice level helpful resource.

                                                                      + */ + HelpfulResource?: ChoiceContent; + + /** + *

                                                                      The choice level improvement plan.

                                                                      + */ + ImprovementPlan?: ChoiceContent; } export namespace Choice { @@ -150,6 +184,11 @@ export interface Answer { */ HelpfulResourceUrl?: string; + /** + *

                                                                      The helpful resource text to be displayed.

                                                                      + */ + HelpfulResourceDisplayText?: string; + /** *

                                                                      List of choices available for a question.

                                                                      */ @@ -291,12 +330,12 @@ export namespace AnswerSummary { */ export interface AssociateLensesInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; /** - *

                                                                      List of lens aliases to associate or disassociate with a workload.

                                                                      + *

                                                                      List of lens aliases to associate or disassociate with a workload. Up to 10 lenses can be specified.

                                                                      *

                                                                      Identify a lens using its LensSummary$LensAlias.

                                                                      */ LensAliases: string[] | undefined; @@ -343,7 +382,7 @@ export namespace ConflictException { } /** - *

                                                                      There is a problem with the AWS Well-Architected Tool API service.

                                                                      + *

                                                                      There is a problem with the Well-Architected Tool API service.

                                                                      */ export interface InternalServerException extends __SmithyException, $MetadataBearer { name: "InternalServerException"; @@ -487,6 +526,36 @@ export namespace ValidationException { }); } +/** + *

                                                                      The choice level improvement plan.

                                                                      + */ +export interface ChoiceImprovementPlan { + /** + *

                                                                      The ID of a choice.

                                                                      + */ + ChoiceId?: string; + + /** + *

                                                                      The display text for the improvement plan.

                                                                      + */ + DisplayText?: string; + + /** + *

                                                                      The improvement plan URL for a question.

                                                                      + *

                                                                      This value is only available if the question has been answered.

                                                                      + */ + ImprovementPlanUrl?: string; +} + +export namespace ChoiceImprovementPlan { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ChoiceImprovementPlan): any => ({ + ...obj, + }); +} + /** *

                                                                      A list of choices to be updated.

                                                                      */ @@ -516,20 +585,17 @@ export namespace ChoiceUpdate { }); } -/** - *

                                                                      Input for milestone creation.

                                                                      - */ -export interface CreateMilestoneInput { +export interface CreateLensShareInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      */ - WorkloadId: string | undefined; + LensAlias: string | undefined; /** - *

                                                                      The name of the milestone in a workload.

                                                                      - *

                                                                      Milestone names must be unique within a workload.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ - MilestoneName: string | undefined; + SharedWith: string | undefined; /** *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent @@ -538,44 +604,35 @@ export interface CreateMilestoneInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ ClientRequestToken?: string; } -export namespace CreateMilestoneInput { +export namespace CreateLensShareInput { /** * @internal */ - export const filterSensitiveLog = (obj: CreateMilestoneInput): any => ({ + export const filterSensitiveLog = (obj: CreateLensShareInput): any => ({ ...obj, }); } -/** - *

                                                                      Output of a create milestone call.

                                                                      - */ -export interface CreateMilestoneOutput { - /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      - */ - WorkloadId?: string; - +export interface CreateLensShareOutput { /** - *

                                                                      The milestone number.

                                                                      - *

                                                                      A workload can have a maximum of 100 milestones.

                                                                      + *

                                                                      The ID associated with the workload share.

                                                                      */ - MilestoneNumber?: number; + ShareId?: string; } -export namespace CreateMilestoneOutput { +export namespace CreateLensShareOutput { /** * @internal */ - export const filterSensitiveLog = (obj: CreateMilestoneOutput): any => ({ + export const filterSensitiveLog = (obj: CreateLensShareOutput): any => ({ ...obj, }); } @@ -621,6 +678,132 @@ export namespace ServiceQuotaExceededException { }); } +export interface CreateLensVersionInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      The version of the lens being created.

                                                                      + */ + LensVersion: string | undefined; + + /** + *

                                                                      Set to true if this new major lens version.

                                                                      + */ + IsMajorVersion?: boolean; + + /** + *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent + * (executes only once).

                                                                      + *

                                                                      You should not reuse the same token for other requests. If you retry a request with + * the same client request token and the same parameters after it has completed + * successfully, the result of the original request is returned.

                                                                      + * + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, + * you must provide this token or the request will fail.

                                                                      + *
                                                                      + */ + ClientRequestToken?: string; +} + +export namespace CreateLensVersionInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateLensVersionInput): any => ({ + ...obj, + }); +} + +export interface CreateLensVersionOutput { + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + + /** + *

                                                                      The version of the lens.

                                                                      + */ + LensVersion?: string; +} + +export namespace CreateLensVersionOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateLensVersionOutput): any => ({ + ...obj, + }); +} + +/** + *

                                                                      Input for milestone creation.

                                                                      + */ +export interface CreateMilestoneInput { + /** + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      + */ + WorkloadId: string | undefined; + + /** + *

                                                                      The name of the milestone in a workload.

                                                                      + *

                                                                      Milestone names must be unique within a workload.

                                                                      + */ + MilestoneName: string | undefined; + + /** + *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent + * (executes only once).

                                                                      + *

                                                                      You should not reuse the same token for other requests. If you retry a request with + * the same client request token and the same parameters after it has completed + * successfully, the result of the original request is returned.

                                                                      + * + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, + * you must provide this token or the request will fail.

                                                                      + *
                                                                      + */ + ClientRequestToken?: string; +} + +export namespace CreateMilestoneInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateMilestoneInput): any => ({ + ...obj, + }); +} + +/** + *

                                                                      Output of a create milestone call.

                                                                      + */ +export interface CreateMilestoneOutput { + /** + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      + */ + WorkloadId?: string; + + /** + *

                                                                      The milestone number.

                                                                      + *

                                                                      A workload can have a maximum of 100 milestones.

                                                                      + */ + MilestoneNumber?: number; +} + +export namespace CreateMilestoneOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateMilestoneOutput): any => ({ + ...obj, + }); +} + export enum WorkloadEnvironment { PREPRODUCTION = "PREPRODUCTION", PRODUCTION = "PRODUCTION", @@ -632,7 +815,7 @@ export enum WorkloadEnvironment { export interface CreateWorkloadInput { /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName: string | undefined; @@ -648,18 +831,18 @@ export interface CreateWorkloadInput { Environment: WorkloadEnvironment | string | undefined; /** - *

                                                                      The list of AWS account IDs associated with the workload.

                                                                      + *

                                                                      The list of Amazon Web Services account IDs associated with the workload.

                                                                      */ AccountIds?: string[]; /** - *

                                                                      The list of AWS Regions associated with the workload, for example, + *

                                                                      The list of Amazon Web Services Regions associated with the workload, for example, * us-east-2, or ca-central-1.

                                                                      */ AwsRegions?: string[]; /** - *

                                                                      The list of non-AWS Regions associated with the workload.

                                                                      + *

                                                                      The list of non-Amazon Web Services Regions associated with the workload.

                                                                      */ NonAwsRegions?: string[]; @@ -845,8 +1028,8 @@ export interface CreateWorkloadInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ @@ -872,7 +1055,7 @@ export namespace CreateWorkloadInput { */ export interface CreateWorkloadOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -901,12 +1084,12 @@ export enum PermissionType { */ export interface CreateWorkloadShareInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; /** - *

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ SharedWith: string | undefined; @@ -922,8 +1105,8 @@ export interface CreateWorkloadShareInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ @@ -944,7 +1127,7 @@ export namespace CreateWorkloadShareInput { */ export interface CreateWorkloadShareOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -963,12 +1146,90 @@ export namespace CreateWorkloadShareOutput { }); } +export enum LensStatusType { + ALL = "ALL", + DRAFT = "DRAFT", + PUBLISHED = "PUBLISHED", +} + +export interface DeleteLensInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent + * (executes only once).

                                                                      + *

                                                                      You should not reuse the same token for other requests. If you retry a request with + * the same client request token and the same parameters after it has completed + * successfully, the result of the original request is returned.

                                                                      + * + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, + * you must provide this token or the request will fail.

                                                                      + *
                                                                      + */ + ClientRequestToken?: string; + + /** + *

                                                                      The status of the lens to be deleted.

                                                                      + */ + LensStatus: LensStatusType | string | undefined; +} + +export namespace DeleteLensInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteLensInput): any => ({ + ...obj, + }); +} + +export interface DeleteLensShareInput { + /** + *

                                                                      The ID associated with the workload share.

                                                                      + */ + ShareId: string | undefined; + + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent + * (executes only once).

                                                                      + *

                                                                      You should not reuse the same token for other requests. If you retry a request with + * the same client request token and the same parameters after it has completed + * successfully, the result of the original request is returned.

                                                                      + * + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, + * you must provide this token or the request will fail.

                                                                      + *
                                                                      + */ + ClientRequestToken?: string; +} + +export namespace DeleteLensShareInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteLensShareInput): any => ({ + ...obj, + }); +} + /** *

                                                                      Input for workload deletion.

                                                                      */ export interface DeleteWorkloadInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -979,8 +1240,8 @@ export interface DeleteWorkloadInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ @@ -1006,7 +1267,7 @@ export interface DeleteWorkloadShareInput { ShareId: string | undefined; /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -1017,8 +1278,8 @@ export interface DeleteWorkloadShareInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ @@ -1045,12 +1306,12 @@ export enum DifferenceStatus { */ export interface DisassociateLensesInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; /** - *

                                                                      List of lens aliases to associate or disassociate with a workload.

                                                                      + *

                                                                      List of lens aliases to associate or disassociate with a workload. Up to 10 lenses can be specified.

                                                                      *

                                                                      Identify a lens using its LensSummary$LensAlias.

                                                                      */ LensAliases: string[] | undefined; @@ -1065,12 +1326,50 @@ export namespace DisassociateLensesInput { }); } +export interface ExportLensInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      The lens version to be exported.

                                                                      + */ + LensVersion?: string; +} + +export namespace ExportLensInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExportLensInput): any => ({ + ...obj, + }); +} + +export interface ExportLensOutput { + /** + *

                                                                      The JSON for the lens.

                                                                      + */ + LensJSON?: string; +} + +export namespace ExportLensOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ExportLensOutput): any => ({ + ...obj, + }); +} + /** *

                                                                      Input to get answer.

                                                                      */ export interface GetAnswerInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -1106,7 +1405,7 @@ export namespace GetAnswerInput { */ export interface GetAnswerOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -1123,16 +1422,103 @@ export interface GetAnswerOutput { LensAlias?: string; /** - *

                                                                      An answer of the question.

                                                                      + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + + /** + *

                                                                      An answer of the question.

                                                                      + */ + Answer?: Answer; +} + +export namespace GetAnswerOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetAnswerOutput): any => ({ + ...obj, + }); +} + +export interface GetLensInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      The lens version to be retrieved.

                                                                      + */ + LensVersion?: string; +} + +export namespace GetLensInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetLensInput): any => ({ + ...obj, + }); +} + +/** + *

                                                                      A lens return object.

                                                                      + */ +export interface Lens { + /** + *

                                                                      The ARN of a lens.

                                                                      + */ + LensArn?: string; + + /** + *

                                                                      The version of a lens.

                                                                      + */ + LensVersion?: string; + + /** + *

                                                                      The full name of the lens.

                                                                      + */ + Name?: string; + + /** + *

                                                                      The description of the lens.

                                                                      + */ + Description?: string; + + /** + *

                                                                      The Amazon Web Services account ID that owns the lens.

                                                                      + */ + Owner?: string; + + /** + *

                                                                      The ID assigned to the share invitation.

                                                                      + */ + ShareInvitationId?: string; +} + +export namespace Lens { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Lens): any => ({ + ...obj, + }); +} + +export interface GetLensOutput { + /** + *

                                                                      A lens return object.

                                                                      */ - Answer?: Answer; + Lens?: Lens; } -export namespace GetAnswerOutput { +export namespace GetLensOutput { /** * @internal */ - export const filterSensitiveLog = (obj: GetAnswerOutput): any => ({ + export const filterSensitiveLog = (obj: GetLensOutput): any => ({ ...obj, }); } @@ -1142,7 +1528,7 @@ export namespace GetAnswerOutput { */ export interface GetLensReviewInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -1170,8 +1556,10 @@ export namespace GetLensReviewInput { export enum LensStatus { CURRENT = "CURRENT", + DELETED = "DELETED", DEPRECATED = "DEPRECATED", NOT_CURRENT = "NOT_CURRENT", + UNSHARED = "UNSHARED", } /** @@ -1219,6 +1607,11 @@ export interface LensReview { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The version of the lens.

                                                                      */ @@ -1274,7 +1667,7 @@ export namespace LensReview { */ export interface GetLensReviewOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -1304,7 +1697,7 @@ export namespace GetLensReviewOutput { */ export interface GetLensReviewReportInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -1340,6 +1733,11 @@ export interface LensReviewReport { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The Base64-encoded string representation of a lens review report.

                                                                      *

                                                                      This data can be used to create a PDF file.

                                                                      @@ -1361,7 +1759,7 @@ export namespace LensReviewReport { */ export interface GetLensReviewReportOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -1396,7 +1794,12 @@ export interface GetLensVersionDifferenceInput { /** *

                                                                      The base version of the lens.

                                                                      */ - BaseLensVersion: string | undefined; + BaseLensVersion?: string; + + /** + *

                                                                      The lens version to target a difference for.

                                                                      + */ + TargetLensVersion?: string; } export namespace GetLensVersionDifferenceInput { @@ -1447,6 +1850,11 @@ export interface PillarDifference { */ PillarId?: string; + /** + *

                                                                      The name of the pillar.

                                                                      + */ + PillarName?: string; + /** *

                                                                      Indicates the type of change to the pillar.

                                                                      */ @@ -1493,11 +1901,21 @@ export interface GetLensVersionDifferenceOutput { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The base version of the lens.

                                                                      */ BaseLensVersion?: string; + /** + *

                                                                      The target lens version for the lens.

                                                                      + */ + TargetLensVersion?: string; + /** *

                                                                      The latest version of the lens.

                                                                      */ @@ -1523,7 +1941,7 @@ export namespace GetLensVersionDifferenceOutput { */ export interface GetMilestoneInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -1556,7 +1974,7 @@ export enum WorkloadImprovementStatus { */ export interface Workload { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -1567,7 +1985,7 @@ export interface Workload { /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; @@ -1588,18 +2006,18 @@ export interface Workload { UpdatedAt?: Date; /** - *

                                                                      The list of AWS account IDs associated with the workload.

                                                                      + *

                                                                      The list of Amazon Web Services account IDs associated with the workload.

                                                                      */ AccountIds?: string[]; /** - *

                                                                      The list of AWS Regions associated with the workload, for example, + *

                                                                      The list of Amazon Web Services Regions associated with the workload, for example, * us-east-2, or ca-central-1.

                                                                      */ AwsRegions?: string[]; /** - *

                                                                      The list of non-AWS Regions associated with the workload.

                                                                      + *

                                                                      The list of non-Amazon Web Services Regions associated with the workload.

                                                                      */ NonAwsRegions?: string[]; @@ -1803,7 +2221,7 @@ export interface Workload { Lenses?: string[]; /** - *

                                                                      An AWS account ID.

                                                                      + *

                                                                      An Amazon Web Services account ID.

                                                                      */ Owner?: string; @@ -1868,7 +2286,7 @@ export namespace Milestone { */ export interface GetMilestoneOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -1892,7 +2310,7 @@ export namespace GetMilestoneOutput { */ export interface GetWorkloadInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; } @@ -1925,6 +2343,74 @@ export namespace GetWorkloadOutput { }); } +export interface ImportLensInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias?: string; + + /** + *

                                                                      The JSON representation of a lens.

                                                                      + */ + JSONString: string | undefined; + + /** + *

                                                                      A unique case-sensitive string used to ensure that this request is idempotent + * (executes only once).

                                                                      + *

                                                                      You should not reuse the same token for other requests. If you retry a request with + * the same client request token and the same parameters after it has completed + * successfully, the result of the original request is returned.

                                                                      + * + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, + * you must provide this token or the request will fail.

                                                                      + *
                                                                      + */ + ClientRequestToken?: string; + + /** + *

                                                                      Tags to associate to a lens.

                                                                      + */ + Tags?: { [key: string]: string }; +} + +export namespace ImportLensInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportLensInput): any => ({ + ...obj, + }); +} + +export enum ImportLensStatus { + COMPLETE = "COMPLETE", + ERROR = "ERROR", + IN_PROGRESS = "IN_PROGRESS", +} + +export interface ImportLensOutput { + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + + /** + *

                                                                      The status of the imported lens.

                                                                      + */ + Status?: ImportLensStatus | string; +} + +export namespace ImportLensOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ImportLensOutput): any => ({ + ...obj, + }); +} + /** *

                                                                      An improvement summary of a lens review in a workload.

                                                                      */ @@ -1955,6 +2441,11 @@ export interface ImprovementSummary { *

                                                                      This value is only available if the question has been answered.

                                                                      */ ImprovementPlanUrl?: string; + + /** + *

                                                                      The improvement plan details.

                                                                      + */ + ImprovementPlans?: ChoiceImprovementPlan[]; } export namespace ImprovementSummary { @@ -1976,6 +2467,11 @@ export interface LensReviewSummary { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The version of the lens.

                                                                      */ @@ -2011,10 +2507,58 @@ export namespace LensReviewSummary { }); } +export enum ShareStatus { + ACCEPTED = "ACCEPTED", + EXPIRED = "EXPIRED", + PENDING = "PENDING", + REJECTED = "REJECTED", + REVOKED = "REVOKED", +} + +/** + *

                                                                      A lens share summary return object.

                                                                      + */ +export interface LensShareSummary { + /** + *

                                                                      The ID associated with the workload share.

                                                                      + */ + ShareId?: string; + + /** + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      + */ + SharedWith?: string; + + /** + *

                                                                      The status of a workload share.

                                                                      + */ + Status?: ShareStatus | string; +} + +export namespace LensShareSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: LensShareSummary): any => ({ + ...obj, + }); +} + +export enum LensType { + AWS_OFFICIAL = "AWS_OFFICIAL", + CUSTOM_SELF = "CUSTOM_SELF", + CUSTOM_SHARED = "CUSTOM_SHARED", +} + /** *

                                                                      A lens summary of a lens.

                                                                      */ export interface LensSummary { + /** + *

                                                                      The ARN of the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The alias of the lens, for example, serverless.

                                                                      *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      @@ -2022,19 +2566,44 @@ export interface LensSummary { LensAlias?: string; /** - *

                                                                      The version of the lens.

                                                                      + *

                                                                      The full name of the lens.

                                                                      */ - LensVersion?: string; + LensName?: string; /** - *

                                                                      The full name of the lens.

                                                                      + *

                                                                      The type of the lens.

                                                                      */ - LensName?: string; + LensType?: LensType | string; /** *

                                                                      The description of the lens.

                                                                      */ Description?: string; + + /** + *

                                                                      The date and time recorded.

                                                                      + */ + CreatedAt?: Date; + + /** + *

                                                                      The date and time recorded.

                                                                      + */ + UpdatedAt?: Date; + + /** + *

                                                                      The version of the lens.

                                                                      + */ + LensVersion?: string; + + /** + *

                                                                      An Amazon Web Services account ID.

                                                                      + */ + Owner?: string; + + /** + *

                                                                      The status of the lens.

                                                                      + */ + LensStatus?: LensStatus | string; } export namespace LensSummary { @@ -2051,13 +2620,13 @@ export namespace LensSummary { */ export interface LensUpgradeSummary { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; @@ -2068,6 +2637,11 @@ export interface LensUpgradeSummary { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      The current version of the lens.

                                                                      */ @@ -2093,7 +2667,7 @@ export namespace LensUpgradeSummary { */ export interface ListAnswersInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -2140,7 +2714,7 @@ export namespace ListAnswersInput { */ export interface ListAnswersOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2156,6 +2730,11 @@ export interface ListAnswersOutput { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      List of answer summaries of lens review in a workload.

                                                                      */ @@ -2189,6 +2768,21 @@ export interface ListLensesInput { *

                                                                      The maximum number of results to return for this request.

                                                                      */ MaxResults?: number; + + /** + *

                                                                      The type of lenses to be returned.

                                                                      + */ + LensType?: LensType | string; + + /** + *

                                                                      The status of lenses to be returned.

                                                                      + */ + LensStatus?: LensStatusType | string; + + /** + *

                                                                      The full name of the lens.

                                                                      + */ + LensName?: string; } export namespace ListLensesInput { @@ -2229,7 +2823,7 @@ export namespace ListLensesOutput { */ export interface ListLensReviewImprovementsInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -2276,7 +2870,7 @@ export namespace ListLensReviewImprovementsInput { */ export interface ListLensReviewImprovementsOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2292,6 +2886,11 @@ export interface ListLensReviewImprovementsOutput { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      List of improvement summaries of lens review in a workload.

                                                                      */ @@ -2317,7 +2916,7 @@ export namespace ListLensReviewImprovementsOutput { */ export interface ListLensReviewsInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -2352,7 +2951,7 @@ export namespace ListLensReviewsInput { */ export interface ListLensReviewsOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2382,12 +2981,65 @@ export namespace ListLensReviewsOutput { }); } +export interface ListLensSharesInput { + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias: string | undefined; + + /** + *

                                                                      The Amazon Web Services account ID or IAM role with which the lens is shared.

                                                                      + */ + SharedWithPrefix?: string; + + /** + *

                                                                      The token to use to retrieve the next set of results.

                                                                      + */ + NextToken?: string; + + /** + *

                                                                      The maximum number of results to return for this request.

                                                                      + */ + MaxResults?: number; +} + +export namespace ListLensSharesInput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListLensSharesInput): any => ({ + ...obj, + }); +} + +export interface ListLensSharesOutput { + /** + *

                                                                      A list of lens share summaries.

                                                                      + */ + LensShareSummaries?: LensShareSummary[]; + + /** + *

                                                                      The token to use to retrieve the next set of results.

                                                                      + */ + NextToken?: string; +} + +export namespace ListLensSharesOutput { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListLensSharesOutput): any => ({ + ...obj, + }); +} + /** *

                                                                      Input to list all milestones for a workload.

                                                                      */ export interface ListMilestonesInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -2416,7 +3068,7 @@ export namespace ListMilestonesInput { */ export interface WorkloadSummary { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2427,13 +3079,13 @@ export interface WorkloadSummary { /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; /** - *

                                                                      An AWS account ID.

                                                                      + *

                                                                      An Amazon Web Services account ID.

                                                                      */ Owner?: string; @@ -2508,7 +3160,7 @@ export namespace MilestoneSummary { */ export interface ListMilestonesOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2534,7 +3186,7 @@ export namespace ListMilestonesOutput { export interface ListNotificationsInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2608,6 +3260,11 @@ export namespace ListNotificationsOutput { }); } +export enum ShareResourceType { + LENS = "LENS", + WORKLOAD = "WORKLOAD", +} + /** *

                                                                      Input for List Share Invitations

                                                                      */ @@ -2618,6 +3275,16 @@ export interface ListShareInvitationsInput { */ WorkloadNamePrefix?: string; + /** + *

                                                                      An optional string added to the beginning of each lens name returned in the results.

                                                                      + */ + LensNamePrefix?: string; + + /** + *

                                                                      The type of share invitations to be returned.

                                                                      + */ + ShareResourceType?: ShareResourceType | string; + /** *

                                                                      The token to use to retrieve the next set of results.

                                                                      */ @@ -2648,12 +3315,12 @@ export interface ShareInvitationSummary { ShareInvitationId?: string; /** - *

                                                                      An AWS account ID.

                                                                      + *

                                                                      An Amazon Web Services account ID.

                                                                      */ SharedBy?: string; /** - *

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ SharedWith?: string; @@ -2662,17 +3329,32 @@ export interface ShareInvitationSummary { */ PermissionType?: PermissionType | string; + /** + *

                                                                      The resource type of the share invitation.

                                                                      + */ + ShareResourceType?: ShareResourceType | string; + /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; + + /** + *

                                                                      The full name of the lens.

                                                                      + */ + LensName?: string; + + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; } export namespace ShareInvitationSummary { @@ -2799,12 +3481,12 @@ export namespace ListWorkloadsOutput { */ export interface ListWorkloadSharesInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; /** - *

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ SharedWithPrefix?: string; @@ -2828,14 +3510,6 @@ export namespace ListWorkloadSharesInput { }); } -export enum ShareStatus { - ACCEPTED = "ACCEPTED", - EXPIRED = "EXPIRED", - PENDING = "PENDING", - REJECTED = "REJECTED", - REVOKED = "REVOKED", -} - /** *

                                                                      A workload share summary return object.

                                                                      */ @@ -2846,7 +3520,7 @@ export interface WorkloadShareSummary { ShareId?: string; /** - *

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ SharedWith?: string; @@ -2875,7 +3549,7 @@ export namespace WorkloadShareSummary { */ export interface ListWorkloadSharesOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -2909,9 +3583,25 @@ export interface ShareInvitation { ShareInvitationId?: string; /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The resource type of the share invitation.

                                                                      + */ + ShareResourceType?: ShareResourceType | string; + + /** + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; + + /** + *

                                                                      The alias of the lens, for example, serverless.

                                                                      + *

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      + */ + LensAlias?: string; + + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; } export namespace ShareInvitation { @@ -2998,7 +3688,7 @@ export namespace UntagResourceOutput { */ export interface UpdateAnswerInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -3055,7 +3745,7 @@ export namespace UpdateAnswerInput { */ export interface UpdateAnswerOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -3065,6 +3755,11 @@ export interface UpdateAnswerOutput { */ LensAlias?: string; + /** + *

                                                                      The ARN for the lens.

                                                                      + */ + LensArn?: string; + /** *

                                                                      An answer of the question.

                                                                      */ @@ -3085,7 +3780,7 @@ export namespace UpdateAnswerOutput { */ export interface UpdateLensReviewInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -3120,7 +3815,7 @@ export namespace UpdateLensReviewInput { */ export interface UpdateLensReviewOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -3184,13 +3879,13 @@ export namespace UpdateShareInvitationOutput { */ export interface UpdateWorkloadInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; @@ -3206,18 +3901,18 @@ export interface UpdateWorkloadInput { Environment?: WorkloadEnvironment | string; /** - *

                                                                      The list of AWS account IDs associated with the workload.

                                                                      + *

                                                                      The list of Amazon Web Services account IDs associated with the workload.

                                                                      */ AccountIds?: string[]; /** - *

                                                                      The list of AWS Regions associated with the workload, for example, + *

                                                                      The list of Amazon Web Services Regions associated with the workload, for example, * us-east-2, or ca-central-1.

                                                                      */ AwsRegions?: string[]; /** - *

                                                                      The list of non-AWS Regions associated with the workload.

                                                                      + *

                                                                      The list of non-Amazon Web Services Regions associated with the workload.

                                                                      */ NonAwsRegions?: string[]; @@ -3444,7 +4139,7 @@ export interface UpdateWorkloadShareInput { ShareId: string | undefined; /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -3473,12 +4168,12 @@ export interface WorkloadShare { ShareId?: string; /** - *

                                                                      An AWS account ID.

                                                                      + *

                                                                      An Amazon Web Services account ID.

                                                                      */ SharedBy?: string; /** - *

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      + *

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      */ SharedWith?: string; @@ -3494,13 +4189,13 @@ export interface WorkloadShare { /** *

                                                                      The name of the workload.

                                                                      - *

                                                                      The name must be unique within an account within a Region. Spaces and capitalization + *

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization * are ignored when checking for uniqueness.

                                                                      */ WorkloadName?: string; /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; } @@ -3519,7 +4214,7 @@ export namespace WorkloadShare { */ export interface UpdateWorkloadShareOutput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId?: string; @@ -3540,7 +4235,7 @@ export namespace UpdateWorkloadShareOutput { export interface UpgradeLensReviewInput { /** - *

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      + *

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      */ WorkloadId: string | undefined; @@ -3563,8 +4258,8 @@ export interface UpgradeLensReviewInput { * the same client request token and the same parameters after it has completed * successfully, the result of the original request is returned.

                                                                      * - *

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs - * automatically generate one for you. If you are not using the AWS SDK or the AWS CLI, + *

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs + * automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI, * you must provide this token or the request will fail.

                                                                      *
                                                                      */ diff --git a/clients/client-wellarchitected/src/pagination/ListLensSharesPaginator.ts b/clients/client-wellarchitected/src/pagination/ListLensSharesPaginator.ts new file mode 100644 index 000000000000..8769873ce9c7 --- /dev/null +++ b/clients/client-wellarchitected/src/pagination/ListLensSharesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListLensSharesCommand, + ListLensSharesCommandInput, + ListLensSharesCommandOutput, +} from "../commands/ListLensSharesCommand"; +import { WellArchitected } from "../WellArchitected"; +import { WellArchitectedClient } from "../WellArchitectedClient"; +import { WellArchitectedPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WellArchitectedClient, + input: ListLensSharesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListLensSharesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WellArchitected, + input: ListLensSharesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listLensShares(input, ...args); +}; +export async function* paginateListLensShares( + config: WellArchitectedPaginationConfiguration, + input: ListLensSharesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.NextToken + let token: typeof input.NextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListLensSharesCommandOutput; + while (hasNext) { + input.NextToken = token; + input["MaxResults"] = config.pageSize; + if (config.client instanceof WellArchitected) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WellArchitectedClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WellArchitected | WellArchitectedClient"); + } + yield page; + token = page.NextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-wellarchitected/src/pagination/index.ts b/clients/client-wellarchitected/src/pagination/index.ts index a47e2048433f..1d76ec31630f 100644 --- a/clients/client-wellarchitected/src/pagination/index.ts +++ b/clients/client-wellarchitected/src/pagination/index.ts @@ -2,6 +2,7 @@ export * from "./Interfaces"; export * from "./ListAnswersPaginator"; export * from "./ListLensReviewImprovementsPaginator"; export * from "./ListLensReviewsPaginator"; +export * from "./ListLensSharesPaginator"; export * from "./ListLensesPaginator"; export * from "./ListMilestonesPaginator"; export * from "./ListNotificationsPaginator"; diff --git a/clients/client-wellarchitected/src/protocols/Aws_restJson1.ts b/clients/client-wellarchitected/src/protocols/Aws_restJson1.ts index ffa72feb3ab1..259a3919666a 100644 --- a/clients/client-wellarchitected/src/protocols/Aws_restJson1.ts +++ b/clients/client-wellarchitected/src/protocols/Aws_restJson1.ts @@ -19,19 +19,25 @@ import { import { v4 as generateIdempotencyToken } from "uuid"; import { AssociateLensesCommandInput, AssociateLensesCommandOutput } from "../commands/AssociateLensesCommand"; +import { CreateLensShareCommandInput, CreateLensShareCommandOutput } from "../commands/CreateLensShareCommand"; +import { CreateLensVersionCommandInput, CreateLensVersionCommandOutput } from "../commands/CreateLensVersionCommand"; import { CreateMilestoneCommandInput, CreateMilestoneCommandOutput } from "../commands/CreateMilestoneCommand"; import { CreateWorkloadCommandInput, CreateWorkloadCommandOutput } from "../commands/CreateWorkloadCommand"; import { CreateWorkloadShareCommandInput, CreateWorkloadShareCommandOutput, } from "../commands/CreateWorkloadShareCommand"; +import { DeleteLensCommandInput, DeleteLensCommandOutput } from "../commands/DeleteLensCommand"; +import { DeleteLensShareCommandInput, DeleteLensShareCommandOutput } from "../commands/DeleteLensShareCommand"; import { DeleteWorkloadCommandInput, DeleteWorkloadCommandOutput } from "../commands/DeleteWorkloadCommand"; import { DeleteWorkloadShareCommandInput, DeleteWorkloadShareCommandOutput, } from "../commands/DeleteWorkloadShareCommand"; import { DisassociateLensesCommandInput, DisassociateLensesCommandOutput } from "../commands/DisassociateLensesCommand"; +import { ExportLensCommandInput, ExportLensCommandOutput } from "../commands/ExportLensCommand"; import { GetAnswerCommandInput, GetAnswerCommandOutput } from "../commands/GetAnswerCommand"; +import { GetLensCommandInput, GetLensCommandOutput } from "../commands/GetLensCommand"; import { GetLensReviewCommandInput, GetLensReviewCommandOutput } from "../commands/GetLensReviewCommand"; import { GetLensReviewReportCommandInput, @@ -43,6 +49,7 @@ import { } from "../commands/GetLensVersionDifferenceCommand"; import { GetMilestoneCommandInput, GetMilestoneCommandOutput } from "../commands/GetMilestoneCommand"; import { GetWorkloadCommandInput, GetWorkloadCommandOutput } from "../commands/GetWorkloadCommand"; +import { ImportLensCommandInput, ImportLensCommandOutput } from "../commands/ImportLensCommand"; import { ListAnswersCommandInput, ListAnswersCommandOutput } from "../commands/ListAnswersCommand"; import { ListLensesCommandInput, ListLensesCommandOutput } from "../commands/ListLensesCommand"; import { @@ -50,6 +57,7 @@ import { ListLensReviewImprovementsCommandOutput, } from "../commands/ListLensReviewImprovementsCommand"; import { ListLensReviewsCommandInput, ListLensReviewsCommandOutput } from "../commands/ListLensReviewsCommand"; +import { ListLensSharesCommandInput, ListLensSharesCommandOutput } from "../commands/ListLensSharesCommand"; import { ListMilestonesCommandInput, ListMilestonesCommandOutput } from "../commands/ListMilestonesCommand"; import { ListNotificationsCommandInput, ListNotificationsCommandOutput } from "../commands/ListNotificationsCommand"; import { @@ -83,13 +91,17 @@ import { Choice, ChoiceAnswer, ChoiceAnswerSummary, + ChoiceContent, + ChoiceImprovementPlan, ChoiceUpdate, ConflictException, ImprovementSummary, InternalServerException, + Lens, LensReview, LensReviewReport, LensReviewSummary, + LensShareSummary, LensSummary, LensUpgradeSummary, Milestone, @@ -148,6 +160,78 @@ export const serializeAws_restJson1AssociateLensesCommand = async ( }); }; +export const serializeAws_restJson1CreateLensShareCommand = async ( + input: CreateLensShareCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}/shares"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + let body: any; + body = JSON.stringify({ + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.SharedWith !== undefined && input.SharedWith !== null && { SharedWith: input.SharedWith }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateLensVersionCommand = async ( + input: CreateLensVersionCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}/versions"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + let body: any; + body = JSON.stringify({ + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.IsMajorVersion !== undefined && + input.IsMajorVersion !== null && { IsMajorVersion: input.IsMajorVersion }), + ...(input.LensVersion !== undefined && input.LensVersion !== null && { LensVersion: input.LensVersion }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1CreateMilestoneCommand = async ( input: CreateMilestoneCommandInput, context: __SerdeContext @@ -268,6 +352,81 @@ export const serializeAws_restJson1CreateWorkloadShareCommand = async ( }); }; +export const serializeAws_restJson1DeleteLensCommand = async ( + input: DeleteLensCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + const query: any = { + ...(input.ClientRequestToken !== undefined && { ClientRequestToken: input.ClientRequestToken }), + ...(input.LensStatus !== undefined && { LensStatus: input.LensStatus }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1DeleteLensShareCommand = async ( + input: DeleteLensShareCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}/shares/{ShareId}"; + if (input.ShareId !== undefined) { + const labelValue: string = input.ShareId; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: ShareId."); + } + resolvedPath = resolvedPath.replace("{ShareId}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: ShareId."); + } + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + const query: any = { + ...(input.ClientRequestToken !== undefined && { ClientRequestToken: input.ClientRequestToken }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1DeleteWorkloadCommand = async ( input: DeleteWorkloadCommandInput, context: __SerdeContext @@ -378,6 +537,39 @@ export const serializeAws_restJson1DisassociateLensesCommand = async ( }); }; +export const serializeAws_restJson1ExportLensCommand = async ( + input: ExportLensCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}/export"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + const query: any = { + ...(input.LensVersion !== undefined && { LensVersion: input.LensVersion }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1GetAnswerCommand = async ( input: GetAnswerCommandInput, context: __SerdeContext @@ -430,6 +622,38 @@ export const serializeAws_restJson1GetAnswerCommand = async ( }); }; +export const serializeAws_restJson1GetLensCommand = async ( + input: GetLensCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + const query: any = { + ...(input.LensVersion !== undefined && { LensVersion: input.LensVersion }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1GetLensReviewCommand = async ( input: GetLensReviewCommandInput, context: __SerdeContext @@ -535,6 +759,7 @@ export const serializeAws_restJson1GetLensVersionDifferenceCommand = async ( } const query: any = { ...(input.BaseLensVersion !== undefined && { BaseLensVersion: input.BaseLensVersion }), + ...(input.TargetLensVersion !== undefined && { TargetLensVersion: input.TargetLensVersion }), }; let body: any; return new __HttpRequest({ @@ -616,6 +841,33 @@ export const serializeAws_restJson1GetWorkloadCommand = async ( }); }; +export const serializeAws_restJson1ImportLensCommand = async ( + input: ImportLensCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/importLens"; + let body: any; + body = JSON.stringify({ + ClientRequestToken: input.ClientRequestToken ?? generateIdempotencyToken(), + ...(input.JSONString !== undefined && input.JSONString !== null && { JSONString: input.JSONString }), + ...(input.LensAlias !== undefined && input.LensAlias !== null && { LensAlias: input.LensAlias }), + ...(input.Tags !== undefined && input.Tags !== null && { Tags: serializeAws_restJson1TagMap(input.Tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + export const serializeAws_restJson1ListAnswersCommand = async ( input: ListAnswersCommandInput, context: __SerdeContext @@ -672,6 +924,9 @@ export const serializeAws_restJson1ListLensesCommand = async ( const query: any = { ...(input.NextToken !== undefined && { NextToken: input.NextToken }), ...(input.MaxResults !== undefined && { MaxResults: input.MaxResults.toString() }), + ...(input.LensType !== undefined && { LensType: input.LensType }), + ...(input.LensStatus !== undefined && { LensStatus: input.LensStatus }), + ...(input.LensName !== undefined && { LensName: input.LensName }), }; let body: any; return new __HttpRequest({ @@ -767,6 +1022,41 @@ export const serializeAws_restJson1ListLensReviewsCommand = async ( }); }; +export const serializeAws_restJson1ListLensSharesCommand = async ( + input: ListLensSharesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/lenses/{LensAlias}/shares"; + if (input.LensAlias !== undefined) { + const labelValue: string = input.LensAlias; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: LensAlias."); + } + resolvedPath = resolvedPath.replace("{LensAlias}", __extendedEncodeURIComponent(labelValue)); + } else { + throw new Error("No value provided for input HTTP label: LensAlias."); + } + const query: any = { + ...(input.SharedWithPrefix !== undefined && { SharedWithPrefix: input.SharedWithPrefix }), + ...(input.NextToken !== undefined && { NextToken: input.NextToken }), + ...(input.MaxResults !== undefined && { MaxResults: input.MaxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + export const serializeAws_restJson1ListMilestonesCommand = async ( input: ListMilestonesCommandInput, context: __SerdeContext @@ -838,6 +1128,8 @@ export const serializeAws_restJson1ListShareInvitationsCommand = async ( const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/shareInvitations"; const query: any = { ...(input.WorkloadNamePrefix !== undefined && { WorkloadNamePrefix: input.WorkloadNamePrefix }), + ...(input.LensNamePrefix !== undefined && { LensNamePrefix: input.LensNamePrefix }), + ...(input.ShareResourceType !== undefined && { ShareResourceType: input.ShareResourceType }), ...(input.NextToken !== undefined && { NextToken: input.NextToken }), ...(input.MaxResults !== undefined && { MaxResults: input.MaxResults.toString() }), }; @@ -1393,24 +1685,234 @@ const deserializeAws_restJson1AssociateLensesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1CreateMilestoneCommand = async ( +export const deserializeAws_restJson1CreateLensShareCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1CreateMilestoneCommandError(output, context); + return deserializeAws_restJson1CreateLensShareCommandError(output, context); } - const contents: CreateMilestoneCommandOutput = { + const contents: CreateLensShareCommandOutput = { $metadata: deserializeMetadata(output), - MilestoneNumber: undefined, - WorkloadId: undefined, + ShareId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { - contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); - } - if (data.WorkloadId !== undefined && data.WorkloadId !== null) { - contents.WorkloadId = __expectString(data.WorkloadId); + if (data.ShareId !== undefined && data.ShareId !== null) { + contents.ShareId = __expectString(data.ShareId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateLensShareCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.wellarchitected#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.wellarchitected#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateLensVersionCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateLensVersionCommandError(output, context); + } + const contents: CreateLensVersionCommandOutput = { + $metadata: deserializeMetadata(output), + LensArn: undefined, + LensVersion: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } + if (data.LensVersion !== undefined && data.LensVersion !== null) { + contents.LensVersion = __expectString(data.LensVersion); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateLensVersionCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.wellarchitected#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.wellarchitected#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateMilestoneCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateMilestoneCommandError(output, context); + } + const contents: CreateMilestoneCommandOutput = { + $metadata: deserializeMetadata(output), + MilestoneNumber: undefined, + WorkloadId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { + contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); + } + if (data.WorkloadId !== undefined && data.WorkloadId !== null) { + contents.WorkloadId = __expectString(data.WorkloadId); } return Promise.resolve(contents); }; @@ -1706,24 +2208,24 @@ const deserializeAws_restJson1CreateWorkloadShareCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1DeleteWorkloadCommand = async ( +export const deserializeAws_restJson1DeleteLensCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1DeleteWorkloadCommandError(output, context); + return deserializeAws_restJson1DeleteLensCommandError(output, context); } - const contents: DeleteWorkloadCommandOutput = { + const contents: DeleteLensCommandOutput = { $metadata: deserializeMetadata(output), }; await collectBody(output.body, context); return Promise.resolve(contents); }; -const deserializeAws_restJson1DeleteWorkloadCommandError = async ( +const deserializeAws_restJson1DeleteLensCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1797,24 +2299,24 @@ const deserializeAws_restJson1DeleteWorkloadCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1DeleteWorkloadShareCommand = async ( +export const deserializeAws_restJson1DeleteLensShareCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1DeleteWorkloadShareCommandError(output, context); + return deserializeAws_restJson1DeleteLensShareCommandError(output, context); } - const contents: DeleteWorkloadShareCommandOutput = { + const contents: DeleteLensShareCommandOutput = { $metadata: deserializeMetadata(output), }; await collectBody(output.body, context); return Promise.resolve(contents); }; -const deserializeAws_restJson1DeleteWorkloadShareCommandError = async ( +const deserializeAws_restJson1DeleteLensShareCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1888,24 +2390,24 @@ const deserializeAws_restJson1DeleteWorkloadShareCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1DisassociateLensesCommand = async ( +export const deserializeAws_restJson1DeleteWorkloadCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1DisassociateLensesCommandError(output, context); + return deserializeAws_restJson1DeleteWorkloadCommandError(output, context); } - const contents: DisassociateLensesCommandOutput = { + const contents: DeleteWorkloadCommandOutput = { $metadata: deserializeMetadata(output), }; await collectBody(output.body, context); return Promise.resolve(contents); }; -const deserializeAws_restJson1DisassociateLensesCommandError = async ( +const deserializeAws_restJson1DeleteWorkloadCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -1979,40 +2481,400 @@ const deserializeAws_restJson1DisassociateLensesCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; -export const deserializeAws_restJson1GetAnswerCommand = async ( +export const deserializeAws_restJson1DeleteWorkloadShareCommand = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { if (output.statusCode !== 200 && output.statusCode >= 300) { - return deserializeAws_restJson1GetAnswerCommandError(output, context); + return deserializeAws_restJson1DeleteWorkloadShareCommandError(output, context); } - const contents: GetAnswerCommandOutput = { + const contents: DeleteWorkloadShareCommandOutput = { $metadata: deserializeMetadata(output), - Answer: undefined, - LensAlias: undefined, - MilestoneNumber: undefined, - WorkloadId: undefined, + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteWorkloadShareCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.wellarchitected#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateLensesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateLensesCommandError(output, context); + } + const contents: DisassociateLensesCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateLensesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.wellarchitected#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ExportLensCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ExportLensCommandError(output, context); + } + const contents: ExportLensCommandOutput = { + $metadata: deserializeMetadata(output), + LensJSON: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LensJSON !== undefined && data.LensJSON !== null) { + contents.LensJSON = __expectString(data.LensJSON); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ExportLensCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetAnswerCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetAnswerCommandError(output, context); + } + const contents: GetAnswerCommandOutput = { + $metadata: deserializeMetadata(output), + Answer: undefined, + LensAlias: undefined, + LensArn: undefined, + MilestoneNumber: undefined, + WorkloadId: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.Answer !== undefined && data.Answer !== null) { + contents.Answer = deserializeAws_restJson1Answer(data.Answer, context); + } + if (data.LensAlias !== undefined && data.LensAlias !== null) { + contents.LensAlias = __expectString(data.LensAlias); + } + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } + if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { + contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); + } + if (data.WorkloadId !== undefined && data.WorkloadId !== null) { + contents.WorkloadId = __expectString(data.WorkloadId); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetAnswerCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetLensCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetLensCommandError(output, context); + } + const contents: GetLensCommandOutput = { + $metadata: deserializeMetadata(output), + Lens: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); - if (data.Answer !== undefined && data.Answer !== null) { - contents.Answer = deserializeAws_restJson1Answer(data.Answer, context); - } - if (data.LensAlias !== undefined && data.LensAlias !== null) { - contents.LensAlias = __expectString(data.LensAlias); - } - if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { - contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); - } - if (data.WorkloadId !== undefined && data.WorkloadId !== null) { - contents.WorkloadId = __expectString(data.WorkloadId); + if (data.Lens !== undefined && data.Lens !== null) { + contents.Lens = deserializeAws_restJson1Lens(data.Lens, context); } return Promise.resolve(contents); }; -const deserializeAws_restJson1GetAnswerCommandError = async ( +const deserializeAws_restJson1GetLensCommandError = async ( output: __HttpResponse, context: __SerdeContext -): Promise => { +): Promise => { const parsedOutput: any = { ...output, body: await parseBody(output.body, context), @@ -2280,6 +3142,8 @@ export const deserializeAws_restJson1GetLensVersionDifferenceCommand = async ( BaseLensVersion: undefined, LatestLensVersion: undefined, LensAlias: undefined, + LensArn: undefined, + TargetLensVersion: undefined, VersionDifferences: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); @@ -2292,6 +3156,12 @@ export const deserializeAws_restJson1GetLensVersionDifferenceCommand = async ( if (data.LensAlias !== undefined && data.LensAlias !== null) { contents.LensAlias = __expectString(data.LensAlias); } + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } + if (data.TargetLensVersion !== undefined && data.TargetLensVersion !== null) { + contents.TargetLensVersion = __expectString(data.TargetLensVersion); + } if (data.VersionDifferences !== undefined && data.VersionDifferences !== null) { contents.VersionDifferences = deserializeAws_restJson1VersionDifferences(data.VersionDifferences, context); } @@ -2545,6 +3415,113 @@ const deserializeAws_restJson1GetWorkloadCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1ImportLensCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ImportLensCommandError(output, context); + } + const contents: ImportLensCommandOutput = { + $metadata: deserializeMetadata(output), + LensArn: undefined, + Status: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } + if (data.Status !== undefined && data.Status !== null) { + contents.Status = __expectString(data.Status); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ImportLensCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.wellarchitected#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.wellarchitected#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1ListAnswersCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -2556,6 +3533,7 @@ export const deserializeAws_restJson1ListAnswersCommand = async ( $metadata: deserializeMetadata(output), AnswerSummaries: undefined, LensAlias: undefined, + LensArn: undefined, MilestoneNumber: undefined, NextToken: undefined, WorkloadId: undefined, @@ -2567,6 +3545,9 @@ export const deserializeAws_restJson1ListAnswersCommand = async ( if (data.LensAlias !== undefined && data.LensAlias !== null) { contents.LensAlias = __expectString(data.LensAlias); } + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); } @@ -2742,6 +3723,7 @@ export const deserializeAws_restJson1ListLensReviewImprovementsCommand = async ( $metadata: deserializeMetadata(output), ImprovementSummaries: undefined, LensAlias: undefined, + LensArn: undefined, MilestoneNumber: undefined, NextToken: undefined, WorkloadId: undefined, @@ -2753,6 +3735,9 @@ export const deserializeAws_restJson1ListLensReviewImprovementsCommand = async ( if (data.LensAlias !== undefined && data.LensAlias !== null) { contents.LensAlias = __expectString(data.LensAlias); } + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } if (data.MilestoneNumber !== undefined && data.MilestoneNumber !== null) { contents.MilestoneNumber = __expectInt32(data.MilestoneNumber); } @@ -2933,6 +3918,97 @@ const deserializeAws_restJson1ListLensReviewsCommandError = async ( return Promise.reject(Object.assign(new Error(message), response)); }; +export const deserializeAws_restJson1ListLensSharesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListLensSharesCommandError(output, context); + } + const contents: ListLensSharesCommandOutput = { + $metadata: deserializeMetadata(output), + LensShareSummaries: undefined, + NextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.LensShareSummaries !== undefined && data.LensShareSummaries !== null) { + contents.LensShareSummaries = deserializeAws_restJson1LensShareSummaries(data.LensShareSummaries, context); + } + if (data.NextToken !== undefined && data.NextToken !== null) { + contents.NextToken = __expectString(data.NextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListLensSharesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.wellarchitected#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.wellarchitected#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.wellarchitected#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.wellarchitected#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.wellarchitected#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + export const deserializeAws_restJson1ListMilestonesCommand = async ( output: __HttpResponse, context: __SerdeContext @@ -3570,6 +4646,7 @@ export const deserializeAws_restJson1UpdateAnswerCommand = async ( $metadata: deserializeMetadata(output), Answer: undefined, LensAlias: undefined, + LensArn: undefined, WorkloadId: undefined, }; const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); @@ -3579,6 +4656,9 @@ export const deserializeAws_restJson1UpdateAnswerCommand = async ( if (data.LensAlias !== undefined && data.LensAlias !== null) { contents.LensAlias = __expectString(data.LensAlias); } + if (data.LensArn !== undefined && data.LensArn !== null) { + contents.LensArn = __expectString(data.LensArn); + } if (data.WorkloadId !== undefined && data.WorkloadId !== null) { contents.WorkloadId = __expectString(data.WorkloadId); } @@ -4439,6 +5519,7 @@ const deserializeAws_restJson1Answer = (output: any, context: __SerdeContext): A output.Choices !== undefined && output.Choices !== null ? deserializeAws_restJson1Choices(output.Choices, context) : undefined, + HelpfulResourceDisplayText: __expectString(output.HelpfulResourceDisplayText), HelpfulResourceUrl: __expectString(output.HelpfulResourceUrl), ImprovementPlanUrl: __expectString(output.ImprovementPlanUrl), IsApplicable: __expectBoolean(output.IsApplicable), @@ -4494,6 +5575,14 @@ const deserializeAws_restJson1Choice = (output: any, context: __SerdeContext): C return { ChoiceId: __expectString(output.ChoiceId), Description: __expectString(output.Description), + HelpfulResource: + output.HelpfulResource !== undefined && output.HelpfulResource !== null + ? deserializeAws_restJson1ChoiceContent(output.HelpfulResource, context) + : undefined, + ImprovementPlan: + output.ImprovementPlan !== undefined && output.ImprovementPlan !== null + ? deserializeAws_restJson1ChoiceContent(output.ImprovementPlan, context) + : undefined, Title: __expectString(output.Title), } as any; }; @@ -4537,6 +5626,35 @@ const deserializeAws_restJson1ChoiceAnswerSummary = (output: any, context: __Ser } as any; }; +const deserializeAws_restJson1ChoiceContent = (output: any, context: __SerdeContext): ChoiceContent => { + return { + DisplayText: __expectString(output.DisplayText), + Url: __expectString(output.Url), + } as any; +}; + +const deserializeAws_restJson1ChoiceImprovementPlan = (output: any, context: __SerdeContext): ChoiceImprovementPlan => { + return { + ChoiceId: __expectString(output.ChoiceId), + DisplayText: __expectString(output.DisplayText), + ImprovementPlanUrl: __expectString(output.ImprovementPlanUrl), + } as any; +}; + +const deserializeAws_restJson1ChoiceImprovementPlans = ( + output: any, + context: __SerdeContext +): ChoiceImprovementPlan[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ChoiceImprovementPlan(entry, context); + }); +}; + const deserializeAws_restJson1Choices = (output: any, context: __SerdeContext): Choice[] => { return (output || []) .filter((e: any) => e != null) @@ -4562,6 +5680,10 @@ const deserializeAws_restJson1ImprovementSummaries = (output: any, context: __Se const deserializeAws_restJson1ImprovementSummary = (output: any, context: __SerdeContext): ImprovementSummary => { return { ImprovementPlanUrl: __expectString(output.ImprovementPlanUrl), + ImprovementPlans: + output.ImprovementPlans !== undefined && output.ImprovementPlans !== null + ? deserializeAws_restJson1ChoiceImprovementPlans(output.ImprovementPlans, context) + : undefined, PillarId: __expectString(output.PillarId), QuestionId: __expectString(output.QuestionId), QuestionTitle: __expectString(output.QuestionTitle), @@ -4569,9 +5691,21 @@ const deserializeAws_restJson1ImprovementSummary = (output: any, context: __Serd } as any; }; +const deserializeAws_restJson1Lens = (output: any, context: __SerdeContext): Lens => { + return { + Description: __expectString(output.Description), + LensArn: __expectString(output.LensArn), + LensVersion: __expectString(output.LensVersion), + Name: __expectString(output.Name), + Owner: __expectString(output.Owner), + ShareInvitationId: __expectString(output.ShareInvitationId), + } as any; +}; + const deserializeAws_restJson1LensReview = (output: any, context: __SerdeContext): LensReview => { return { LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), LensName: __expectString(output.LensName), LensStatus: __expectString(output.LensStatus), LensVersion: __expectString(output.LensVersion), @@ -4596,6 +5730,7 @@ const deserializeAws_restJson1LensReviewReport = (output: any, context: __SerdeC return { Base64String: __expectString(output.Base64String), LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), } as any; }; @@ -4613,6 +5748,7 @@ const deserializeAws_restJson1LensReviewSummaries = (output: any, context: __Ser const deserializeAws_restJson1LensReviewSummary = (output: any, context: __SerdeContext): LensReviewSummary => { return { LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), LensName: __expectString(output.LensName), LensStatus: __expectString(output.LensStatus), LensVersion: __expectString(output.LensVersion), @@ -4627,6 +5763,25 @@ const deserializeAws_restJson1LensReviewSummary = (output: any, context: __Serde } as any; }; +const deserializeAws_restJson1LensShareSummaries = (output: any, context: __SerdeContext): LensShareSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1LensShareSummary(entry, context); + }); +}; + +const deserializeAws_restJson1LensShareSummary = (output: any, context: __SerdeContext): LensShareSummary => { + return { + ShareId: __expectString(output.ShareId), + SharedWith: __expectString(output.SharedWith), + Status: __expectString(output.Status), + } as any; +}; + const deserializeAws_restJson1LensSummaries = (output: any, context: __SerdeContext): LensSummary[] => { return (output || []) .filter((e: any) => e != null) @@ -4640,10 +5795,22 @@ const deserializeAws_restJson1LensSummaries = (output: any, context: __SerdeCont const deserializeAws_restJson1LensSummary = (output: any, context: __SerdeContext): LensSummary => { return { + CreatedAt: + output.CreatedAt !== undefined && output.CreatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.CreatedAt))) + : undefined, Description: __expectString(output.Description), LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), LensName: __expectString(output.LensName), + LensStatus: __expectString(output.LensStatus), + LensType: __expectString(output.LensType), LensVersion: __expectString(output.LensVersion), + Owner: __expectString(output.Owner), + UpdatedAt: + output.UpdatedAt !== undefined && output.UpdatedAt !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.UpdatedAt))) + : undefined, } as any; }; @@ -4652,6 +5819,7 @@ const deserializeAws_restJson1LensUpgradeSummary = (output: any, context: __Serd CurrentLensVersion: __expectString(output.CurrentLensVersion), LatestLensVersion: __expectString(output.LatestLensVersion), LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), WorkloadId: __expectString(output.WorkloadId), WorkloadName: __expectString(output.WorkloadName), } as any; @@ -4723,6 +5891,7 @@ const deserializeAws_restJson1PillarDifference = (output: any, context: __SerdeC return { DifferenceStatus: __expectString(output.DifferenceStatus), PillarId: __expectString(output.PillarId), + PillarName: __expectString(output.PillarName), QuestionDifferences: output.QuestionDifferences !== undefined && output.QuestionDifferences !== null ? deserializeAws_restJson1QuestionDifferences(output.QuestionDifferences, context) @@ -4808,7 +5977,10 @@ const deserializeAws_restJson1SelectedChoices = (output: any, context: __SerdeCo const deserializeAws_restJson1ShareInvitation = (output: any, context: __SerdeContext): ShareInvitation => { return { + LensAlias: __expectString(output.LensAlias), + LensArn: __expectString(output.LensArn), ShareInvitationId: __expectString(output.ShareInvitationId), + ShareResourceType: __expectString(output.ShareResourceType), WorkloadId: __expectString(output.WorkloadId), } as any; }; @@ -4832,8 +6004,11 @@ const deserializeAws_restJson1ShareInvitationSummary = ( context: __SerdeContext ): ShareInvitationSummary => { return { + LensArn: __expectString(output.LensArn), + LensName: __expectString(output.LensName), PermissionType: __expectString(output.PermissionType), ShareInvitationId: __expectString(output.ShareInvitationId), + ShareResourceType: __expectString(output.ShareResourceType), SharedBy: __expectString(output.SharedBy), SharedWith: __expectString(output.SharedWith), WorkloadId: __expectString(output.WorkloadId), diff --git a/clients/client-workspaces-web/.gitignore b/clients/client-workspaces-web/.gitignore new file mode 100644 index 000000000000..54f14c9aef25 --- /dev/null +++ b/clients/client-workspaces-web/.gitignore @@ -0,0 +1,9 @@ +/node_modules/ +/build/ +/coverage/ +/docs/ +/dist-* +*.tsbuildinfo +*.tgz +*.log +package-lock.json diff --git a/clients/client-workspaces-web/LICENSE b/clients/client-workspaces-web/LICENSE new file mode 100644 index 000000000000..f9e0c8672bca --- /dev/null +++ b/clients/client-workspaces-web/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/clients/client-workspaces-web/README.md b/clients/client-workspaces-web/README.md new file mode 100644 index 000000000000..07d06153e9a9 --- /dev/null +++ b/clients/client-workspaces-web/README.md @@ -0,0 +1,208 @@ +# @aws-sdk/client-workspaces-web + +[![NPM version](https://img.shields.io/npm/v/@aws-sdk/client-workspaces-web/latest.svg)](https://www.npmjs.com/package/@aws-sdk/client-workspaces-web) +[![NPM downloads](https://img.shields.io/npm/dm/@aws-sdk/client-workspaces-web.svg)](https://www.npmjs.com/package/@aws-sdk/client-workspaces-web) + +## Description + +AWS SDK for JavaScript WorkSpacesWeb Client for Node.js, Browser and React Native. + +

                                                                      WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate +secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide +their employees with access to internal websites and SaaS web applications without the +administrative burden of appliances or specialized client software. WorkSpaces Web provides +simple policy tools tailored for user interactions, while offloading common tasks like +capacity management, scaling, and maintaining browser images.

                                                                      + +## Installing + +To install the this package, simply type add or install @aws-sdk/client-workspaces-web +using your favorite package manager: + +- `npm install @aws-sdk/client-workspaces-web` +- `yarn add @aws-sdk/client-workspaces-web` +- `pnpm add @aws-sdk/client-workspaces-web` + +## Getting Started + +### Import + +The AWS SDK is modulized by clients and commands. +To send a request, you only need to import the `WorkSpacesWebClient` and +the commands you need, for example `AssociateBrowserSettingsCommand`: + +```js +// ES5 example +const { WorkSpacesWebClient, AssociateBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); +``` + +```ts +// ES6+ example +import { WorkSpacesWebClient, AssociateBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; +``` + +### Usage + +To send a request, you: + +- Initiate client with configuration (e.g. credentials, region). +- Initiate command with input parameters. +- Call `send` operation on client with command object as input. +- If you are using a custom http handler, you may call `destroy()` to close open connections. + +```js +// a client can be shared by different commands. +const client = new WorkSpacesWebClient({ region: "REGION" }); + +const params = { + /** input parameters */ +}; +const command = new AssociateBrowserSettingsCommand(params); +``` + +#### Async/await + +We recommend using [await](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/await) +operator to wait for the promise returned by send operation as follows: + +```js +// async/await. +try { + const data = await client.send(command); + // process data. +} catch (error) { + // error handling. +} finally { + // finally. +} +``` + +Async-await is clean, concise, intuitive, easy to debug and has better error handling +as compared to using Promise chains or callbacks. + +#### Promises + +You can also use [Promise chaining](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Using_promises#chaining) +to execute send operation. + +```js +client.send(command).then( + (data) => { + // process data. + }, + (error) => { + // error handling. + } +); +``` + +Promises can also be called using `.catch()` and `.finally()` as follows: + +```js +client + .send(command) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }) + .finally(() => { + // finally. + }); +``` + +#### Callbacks + +We do not recommend using callbacks because of [callback hell](http://callbackhell.com/), +but they are supported by the send operation. + +```js +// callbacks. +client.send(command, (err, data) => { + // proccess err and data. +}); +``` + +#### v2 compatible style + +The client can also send requests using v2 compatible style. +However, it results in a bigger bundle size and may be dropped in next major version. More details in the blog post +on [modular packages in AWS SDK for JavaScript](https://aws.amazon.com/blogs/developer/modular-packages-in-aws-sdk-for-javascript/) + +```ts +import * as AWS from "@aws-sdk/client-workspaces-web"; +const client = new AWS.WorkSpacesWeb({ region: "REGION" }); + +// async/await. +try { + const data = await client.associateBrowserSettings(params); + // process data. +} catch (error) { + // error handling. +} + +// Promises. +client + .associateBrowserSettings(params) + .then((data) => { + // process data. + }) + .catch((error) => { + // error handling. + }); + +// callbacks. +client.associateBrowserSettings(params, (err, data) => { + // proccess err and data. +}); +``` + +### Troubleshooting + +When the service returns an exception, the error will include the exception information, +as well as response metadata (e.g. request id). + +```js +try { + const data = await client.send(command); + // process data. +} catch (error) { + const { requestId, cfId, extendedRequestId } = error.$metadata; + console.log({ requestId, cfId, extendedRequestId }); + /** + * The keys within exceptions are also parsed. + * You can access them by specifying exception names: + * if (error.name === 'SomeServiceException') { + * const value = error.specialKeyInException; + * } + */ +} +``` + +## Getting Help + +Please use these community resources for getting help. +We use the GitHub issues for tracking bugs and feature requests, but have limited bandwidth to address them. + +- Visit [Developer Guide](https://docs.aws.amazon.com/sdk-for-javascript/v3/developer-guide/welcome.html) + or [API Reference](https://docs.aws.amazon.com/AWSJavaScriptSDK/v3/latest/index.html). +- Check out the blog posts tagged with [`aws-sdk-js`](https://aws.amazon.com/blogs/developer/tag/aws-sdk-js/) + on AWS Developer Blog. +- Ask a question on [StackOverflow](https://stackoverflow.com/questions/tagged/aws-sdk-js) and tag it with `aws-sdk-js`. +- Join the AWS JavaScript community on [gitter](https://gitter.im/aws/aws-sdk-js-v3). +- If it turns out that you may have found a bug, please [open an issue](https://github.com/aws/aws-sdk-js-v3/issues/new/choose). + +To test your universal JavaScript code in Node.js, browser and react-native environments, +visit our [code samples repo](https://github.com/aws-samples/aws-sdk-js-tests). + +## Contributing + +This client code is generated automatically. Any modifications will be overwritten the next time the `@aws-sdk/client-workspaces-web` package is updated. +To contribute to client you can check our [generate clients scripts](https://github.com/aws/aws-sdk-js-v3/tree/main/scripts/generate-clients). + +## License + +This SDK is distributed under the +[Apache License, Version 2.0](http://www.apache.org/licenses/LICENSE-2.0), +see LICENSE for more information. diff --git a/clients/client-workspaces-web/jest.config.js b/clients/client-workspaces-web/jest.config.js new file mode 100644 index 000000000000..02eed352c6a8 --- /dev/null +++ b/clients/client-workspaces-web/jest.config.js @@ -0,0 +1,4 @@ +module.exports = { + preset: "ts-jest", + testMatch: ["**/*.spec.ts", "!**/*.browser.spec.ts", "!**/*.integ.spec.ts"], +}; diff --git a/clients/client-workspaces-web/package.json b/clients/client-workspaces-web/package.json new file mode 100644 index 000000000000..549d28c109c2 --- /dev/null +++ b/clients/client-workspaces-web/package.json @@ -0,0 +1,96 @@ +{ + "name": "@aws-sdk/client-workspaces-web", + "description": "AWS SDK for JavaScript Workspaces Web Client for Node.js, Browser and React Native", + "version": "3.0.0", + "scripts": { + "build": "yarn build:cjs && yarn build:es && yarn build:types", + "build:cjs": "tsc -p tsconfig.json", + "build:docs": "yarn clean:docs && typedoc ./", + "build:es": "tsc -p tsconfig.es.json", + "build:types": "tsc -p tsconfig.types.json", + "clean": "yarn clean:dist && yarn clean:docs", + "clean:dist": "rimraf ./dist-*", + "clean:docs": "rimraf ./docs", + "downlevel-dts": "downlevel-dts dist-types dist-types/ts3.4", + "test": "jest --coverage --passWithNoTests" + }, + "main": "./dist-cjs/index.js", + "types": "./dist-types/index.d.ts", + "module": "./dist-es/index.js", + "sideEffects": false, + "dependencies": { + "@aws-crypto/sha256-browser": "2.0.0", + "@aws-crypto/sha256-js": "2.0.0", + "@aws-sdk/client-sts": "3.43.0", + "@aws-sdk/config-resolver": "3.40.0", + "@aws-sdk/credential-provider-node": "3.41.0", + "@aws-sdk/fetch-http-handler": "3.40.0", + "@aws-sdk/hash-node": "3.40.0", + "@aws-sdk/invalid-dependency": "3.40.0", + "@aws-sdk/middleware-content-length": "3.40.0", + "@aws-sdk/middleware-host-header": "3.40.0", + "@aws-sdk/middleware-logger": "3.40.0", + "@aws-sdk/middleware-retry": "3.40.0", + "@aws-sdk/middleware-serde": "3.40.0", + "@aws-sdk/middleware-signing": "3.40.0", + "@aws-sdk/middleware-stack": "3.40.0", + "@aws-sdk/middleware-user-agent": "3.40.0", + "@aws-sdk/node-config-provider": "3.40.0", + "@aws-sdk/node-http-handler": "3.40.0", + "@aws-sdk/protocol-http": "3.40.0", + "@aws-sdk/smithy-client": "3.41.0", + "@aws-sdk/types": "3.40.0", + "@aws-sdk/url-parser": "3.40.0", + "@aws-sdk/util-base64-browser": "3.37.0", + "@aws-sdk/util-base64-node": "3.37.0", + "@aws-sdk/util-body-length-browser": "3.37.0", + "@aws-sdk/util-body-length-node": "3.37.0", + "@aws-sdk/util-user-agent-browser": "3.40.0", + "@aws-sdk/util-user-agent-node": "3.40.0", + "@aws-sdk/util-utf8-browser": "3.37.0", + "@aws-sdk/util-utf8-node": "3.37.0", + "tslib": "^2.3.0", + "uuid": "^8.3.2" + }, + "devDependencies": { + "@aws-sdk/service-client-documentation-generator": "3.38.0", + "@types/node": "^12.7.5", + "@types/uuid": "^8.3.0", + "downlevel-dts": "0.7.0", + "jest": "^26.1.0", + "rimraf": "^3.0.0", + "ts-jest": "^26.4.1", + "typedoc": "^0.19.2", + "typescript": "~4.3.5" + }, + "engines": { + "node": ">=10.0.0" + }, + "typesVersions": { + "<4.0": { + "dist-types/*": [ + "dist-types/ts3.4/*" + ] + } + }, + "files": [ + "dist-*" + ], + "author": { + "name": "AWS SDK for JavaScript Team", + "url": "https://aws.amazon.com/javascript/" + }, + "license": "Apache-2.0", + "browser": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.browser" + }, + "react-native": { + "./dist-es/runtimeConfig": "./dist-es/runtimeConfig.native" + }, + "homepage": "https://github.com/aws/aws-sdk-js-v3/tree/main/clients/client-workspaces-web", + "repository": { + "type": "git", + "url": "https://github.com/aws/aws-sdk-js-v3.git", + "directory": "clients/client-workspaces-web" + } +} diff --git a/clients/client-workspaces-web/src/WorkSpacesWeb.ts b/clients/client-workspaces-web/src/WorkSpacesWeb.ts new file mode 100644 index 000000000000..fbcc14e62694 --- /dev/null +++ b/clients/client-workspaces-web/src/WorkSpacesWeb.ts @@ -0,0 +1,1612 @@ +import { HttpHandlerOptions as __HttpHandlerOptions } from "@aws-sdk/types"; + +import { + AssociateBrowserSettingsCommand, + AssociateBrowserSettingsCommandInput, + AssociateBrowserSettingsCommandOutput, +} from "./commands/AssociateBrowserSettingsCommand"; +import { + AssociateNetworkSettingsCommand, + AssociateNetworkSettingsCommandInput, + AssociateNetworkSettingsCommandOutput, +} from "./commands/AssociateNetworkSettingsCommand"; +import { + AssociateTrustStoreCommand, + AssociateTrustStoreCommandInput, + AssociateTrustStoreCommandOutput, +} from "./commands/AssociateTrustStoreCommand"; +import { + AssociateUserSettingsCommand, + AssociateUserSettingsCommandInput, + AssociateUserSettingsCommandOutput, +} from "./commands/AssociateUserSettingsCommand"; +import { + CreateBrowserSettingsCommand, + CreateBrowserSettingsCommandInput, + CreateBrowserSettingsCommandOutput, +} from "./commands/CreateBrowserSettingsCommand"; +import { + CreateIdentityProviderCommand, + CreateIdentityProviderCommandInput, + CreateIdentityProviderCommandOutput, +} from "./commands/CreateIdentityProviderCommand"; +import { + CreateNetworkSettingsCommand, + CreateNetworkSettingsCommandInput, + CreateNetworkSettingsCommandOutput, +} from "./commands/CreateNetworkSettingsCommand"; +import { + CreatePortalCommand, + CreatePortalCommandInput, + CreatePortalCommandOutput, +} from "./commands/CreatePortalCommand"; +import { + CreateTrustStoreCommand, + CreateTrustStoreCommandInput, + CreateTrustStoreCommandOutput, +} from "./commands/CreateTrustStoreCommand"; +import { + CreateUserSettingsCommand, + CreateUserSettingsCommandInput, + CreateUserSettingsCommandOutput, +} from "./commands/CreateUserSettingsCommand"; +import { + DeleteBrowserSettingsCommand, + DeleteBrowserSettingsCommandInput, + DeleteBrowserSettingsCommandOutput, +} from "./commands/DeleteBrowserSettingsCommand"; +import { + DeleteIdentityProviderCommand, + DeleteIdentityProviderCommandInput, + DeleteIdentityProviderCommandOutput, +} from "./commands/DeleteIdentityProviderCommand"; +import { + DeleteNetworkSettingsCommand, + DeleteNetworkSettingsCommandInput, + DeleteNetworkSettingsCommandOutput, +} from "./commands/DeleteNetworkSettingsCommand"; +import { + DeletePortalCommand, + DeletePortalCommandInput, + DeletePortalCommandOutput, +} from "./commands/DeletePortalCommand"; +import { + DeleteTrustStoreCommand, + DeleteTrustStoreCommandInput, + DeleteTrustStoreCommandOutput, +} from "./commands/DeleteTrustStoreCommand"; +import { + DeleteUserSettingsCommand, + DeleteUserSettingsCommandInput, + DeleteUserSettingsCommandOutput, +} from "./commands/DeleteUserSettingsCommand"; +import { + DisassociateBrowserSettingsCommand, + DisassociateBrowserSettingsCommandInput, + DisassociateBrowserSettingsCommandOutput, +} from "./commands/DisassociateBrowserSettingsCommand"; +import { + DisassociateNetworkSettingsCommand, + DisassociateNetworkSettingsCommandInput, + DisassociateNetworkSettingsCommandOutput, +} from "./commands/DisassociateNetworkSettingsCommand"; +import { + DisassociateTrustStoreCommand, + DisassociateTrustStoreCommandInput, + DisassociateTrustStoreCommandOutput, +} from "./commands/DisassociateTrustStoreCommand"; +import { + DisassociateUserSettingsCommand, + DisassociateUserSettingsCommandInput, + DisassociateUserSettingsCommandOutput, +} from "./commands/DisassociateUserSettingsCommand"; +import { + GetBrowserSettingsCommand, + GetBrowserSettingsCommandInput, + GetBrowserSettingsCommandOutput, +} from "./commands/GetBrowserSettingsCommand"; +import { + GetIdentityProviderCommand, + GetIdentityProviderCommandInput, + GetIdentityProviderCommandOutput, +} from "./commands/GetIdentityProviderCommand"; +import { + GetNetworkSettingsCommand, + GetNetworkSettingsCommandInput, + GetNetworkSettingsCommandOutput, +} from "./commands/GetNetworkSettingsCommand"; +import { GetPortalCommand, GetPortalCommandInput, GetPortalCommandOutput } from "./commands/GetPortalCommand"; +import { + GetPortalServiceProviderMetadataCommand, + GetPortalServiceProviderMetadataCommandInput, + GetPortalServiceProviderMetadataCommandOutput, +} from "./commands/GetPortalServiceProviderMetadataCommand"; +import { + GetTrustStoreCertificateCommand, + GetTrustStoreCertificateCommandInput, + GetTrustStoreCertificateCommandOutput, +} from "./commands/GetTrustStoreCertificateCommand"; +import { + GetTrustStoreCommand, + GetTrustStoreCommandInput, + GetTrustStoreCommandOutput, +} from "./commands/GetTrustStoreCommand"; +import { + GetUserSettingsCommand, + GetUserSettingsCommandInput, + GetUserSettingsCommandOutput, +} from "./commands/GetUserSettingsCommand"; +import { + ListBrowserSettingsCommand, + ListBrowserSettingsCommandInput, + ListBrowserSettingsCommandOutput, +} from "./commands/ListBrowserSettingsCommand"; +import { + ListIdentityProvidersCommand, + ListIdentityProvidersCommandInput, + ListIdentityProvidersCommandOutput, +} from "./commands/ListIdentityProvidersCommand"; +import { + ListNetworkSettingsCommand, + ListNetworkSettingsCommandInput, + ListNetworkSettingsCommandOutput, +} from "./commands/ListNetworkSettingsCommand"; +import { ListPortalsCommand, ListPortalsCommandInput, ListPortalsCommandOutput } from "./commands/ListPortalsCommand"; +import { + ListTagsForResourceCommand, + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListTrustStoreCertificatesCommand, + ListTrustStoreCertificatesCommandInput, + ListTrustStoreCertificatesCommandOutput, +} from "./commands/ListTrustStoreCertificatesCommand"; +import { + ListTrustStoresCommand, + ListTrustStoresCommandInput, + ListTrustStoresCommandOutput, +} from "./commands/ListTrustStoresCommand"; +import { + ListUserSettingsCommand, + ListUserSettingsCommandInput, + ListUserSettingsCommandOutput, +} from "./commands/ListUserSettingsCommand"; +import { TagResourceCommand, TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { + UntagResourceCommand, + UntagResourceCommandInput, + UntagResourceCommandOutput, +} from "./commands/UntagResourceCommand"; +import { + UpdateBrowserSettingsCommand, + UpdateBrowserSettingsCommandInput, + UpdateBrowserSettingsCommandOutput, +} from "./commands/UpdateBrowserSettingsCommand"; +import { + UpdateIdentityProviderCommand, + UpdateIdentityProviderCommandInput, + UpdateIdentityProviderCommandOutput, +} from "./commands/UpdateIdentityProviderCommand"; +import { + UpdateNetworkSettingsCommand, + UpdateNetworkSettingsCommandInput, + UpdateNetworkSettingsCommandOutput, +} from "./commands/UpdateNetworkSettingsCommand"; +import { + UpdatePortalCommand, + UpdatePortalCommandInput, + UpdatePortalCommandOutput, +} from "./commands/UpdatePortalCommand"; +import { + UpdateTrustStoreCommand, + UpdateTrustStoreCommandInput, + UpdateTrustStoreCommandOutput, +} from "./commands/UpdateTrustStoreCommand"; +import { + UpdateUserSettingsCommand, + UpdateUserSettingsCommandInput, + UpdateUserSettingsCommandOutput, +} from "./commands/UpdateUserSettingsCommand"; +import { WorkSpacesWebClient } from "./WorkSpacesWebClient"; + +/** + *

                                                                      WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate + * secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide + * their employees with access to internal websites and SaaS web applications without the + * administrative burden of appliances or specialized client software. WorkSpaces Web provides + * simple policy tools tailored for user interactions, while offloading common tasks like + * capacity management, scaling, and maintaining browser images.

                                                                      + */ +export class WorkSpacesWeb extends WorkSpacesWebClient { + /** + *

                                                                      Associates a browser settings resource with a web portal.

                                                                      + */ + public associateBrowserSettings( + args: AssociateBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateBrowserSettings( + args: AssociateBrowserSettingsCommandInput, + cb: (err: any, data?: AssociateBrowserSettingsCommandOutput) => void + ): void; + public associateBrowserSettings( + args: AssociateBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateBrowserSettingsCommandOutput) => void + ): void; + public associateBrowserSettings( + args: AssociateBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: AssociateBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new AssociateBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Associates a network settings resource with a web portal.

                                                                      + */ + public associateNetworkSettings( + args: AssociateNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateNetworkSettings( + args: AssociateNetworkSettingsCommandInput, + cb: (err: any, data?: AssociateNetworkSettingsCommandOutput) => void + ): void; + public associateNetworkSettings( + args: AssociateNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateNetworkSettingsCommandOutput) => void + ): void; + public associateNetworkSettings( + args: AssociateNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: AssociateNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new AssociateNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Associates a trust store with a web portal.

                                                                      + */ + public associateTrustStore( + args: AssociateTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateTrustStore( + args: AssociateTrustStoreCommandInput, + cb: (err: any, data?: AssociateTrustStoreCommandOutput) => void + ): void; + public associateTrustStore( + args: AssociateTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateTrustStoreCommandOutput) => void + ): void; + public associateTrustStore( + args: AssociateTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateTrustStoreCommandOutput) => void), + cb?: (err: any, data?: AssociateTrustStoreCommandOutput) => void + ): Promise | void { + const command = new AssociateTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Associates a user settings resource with a web portal.

                                                                      + */ + public associateUserSettings( + args: AssociateUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public associateUserSettings( + args: AssociateUserSettingsCommandInput, + cb: (err: any, data?: AssociateUserSettingsCommandOutput) => void + ): void; + public associateUserSettings( + args: AssociateUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: AssociateUserSettingsCommandOutput) => void + ): void; + public associateUserSettings( + args: AssociateUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: AssociateUserSettingsCommandOutput) => void), + cb?: (err: any, data?: AssociateUserSettingsCommandOutput) => void + ): Promise | void { + const command = new AssociateUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates a browser settings resource that can be associated with a web portal. Once + * associated with a web portal, browser settings control how the browser will behave once a + * user starts a streaming session for the web portal.

                                                                      + */ + public createBrowserSettings( + args: CreateBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createBrowserSettings( + args: CreateBrowserSettingsCommandInput, + cb: (err: any, data?: CreateBrowserSettingsCommandOutput) => void + ): void; + public createBrowserSettings( + args: CreateBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateBrowserSettingsCommandOutput) => void + ): void; + public createBrowserSettings( + args: CreateBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: CreateBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new CreateBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates an identity provider resource that is then associated with a web portal.

                                                                      + */ + public createIdentityProvider( + args: CreateIdentityProviderCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createIdentityProvider( + args: CreateIdentityProviderCommandInput, + cb: (err: any, data?: CreateIdentityProviderCommandOutput) => void + ): void; + public createIdentityProvider( + args: CreateIdentityProviderCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateIdentityProviderCommandOutput) => void + ): void; + public createIdentityProvider( + args: CreateIdentityProviderCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateIdentityProviderCommandOutput) => void), + cb?: (err: any, data?: CreateIdentityProviderCommandOutput) => void + ): Promise | void { + const command = new CreateIdentityProviderCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates a network settings resource that can be associated with a web portal. Once + * associated with a web portal, network settings define how streaming instances will connect + * with your specified VPC.

                                                                      + */ + public createNetworkSettings( + args: CreateNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createNetworkSettings( + args: CreateNetworkSettingsCommandInput, + cb: (err: any, data?: CreateNetworkSettingsCommandOutput) => void + ): void; + public createNetworkSettings( + args: CreateNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateNetworkSettingsCommandOutput) => void + ): void; + public createNetworkSettings( + args: CreateNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: CreateNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new CreateNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates a web portal.

                                                                      + */ + public createPortal( + args: CreatePortalCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createPortal(args: CreatePortalCommandInput, cb: (err: any, data?: CreatePortalCommandOutput) => void): void; + public createPortal( + args: CreatePortalCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreatePortalCommandOutput) => void + ): void; + public createPortal( + args: CreatePortalCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreatePortalCommandOutput) => void), + cb?: (err: any, data?: CreatePortalCommandOutput) => void + ): Promise | void { + const command = new CreatePortalCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates a trust store that can be associated with a web portal. A trust store contains + * certificate authority (CA) certificates. Once associated with a web portal, the browser in + * a streaming session will recognize certificates that have been issued using any of the CAs + * in the trust store. If your organization has internal websites that use certificates issued + * by private CAs, you should add the private CA certificate to the trust store.

                                                                      + */ + public createTrustStore( + args: CreateTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createTrustStore( + args: CreateTrustStoreCommandInput, + cb: (err: any, data?: CreateTrustStoreCommandOutput) => void + ): void; + public createTrustStore( + args: CreateTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateTrustStoreCommandOutput) => void + ): void; + public createTrustStore( + args: CreateTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateTrustStoreCommandOutput) => void), + cb?: (err: any, data?: CreateTrustStoreCommandOutput) => void + ): Promise | void { + const command = new CreateTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Creates a user settings resource that can be associated with a web portal. Once + * associated with a web portal, user settings control how users can transfer data between a + * streaming session and the their local devices.

                                                                      + */ + public createUserSettings( + args: CreateUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public createUserSettings( + args: CreateUserSettingsCommandInput, + cb: (err: any, data?: CreateUserSettingsCommandOutput) => void + ): void; + public createUserSettings( + args: CreateUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: CreateUserSettingsCommandOutput) => void + ): void; + public createUserSettings( + args: CreateUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: CreateUserSettingsCommandOutput) => void), + cb?: (err: any, data?: CreateUserSettingsCommandOutput) => void + ): Promise | void { + const command = new CreateUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes browser settings.

                                                                      + */ + public deleteBrowserSettings( + args: DeleteBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteBrowserSettings( + args: DeleteBrowserSettingsCommandInput, + cb: (err: any, data?: DeleteBrowserSettingsCommandOutput) => void + ): void; + public deleteBrowserSettings( + args: DeleteBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteBrowserSettingsCommandOutput) => void + ): void; + public deleteBrowserSettings( + args: DeleteBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: DeleteBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new DeleteBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes the identity provider.

                                                                      + */ + public deleteIdentityProvider( + args: DeleteIdentityProviderCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteIdentityProvider( + args: DeleteIdentityProviderCommandInput, + cb: (err: any, data?: DeleteIdentityProviderCommandOutput) => void + ): void; + public deleteIdentityProvider( + args: DeleteIdentityProviderCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteIdentityProviderCommandOutput) => void + ): void; + public deleteIdentityProvider( + args: DeleteIdentityProviderCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteIdentityProviderCommandOutput) => void), + cb?: (err: any, data?: DeleteIdentityProviderCommandOutput) => void + ): Promise | void { + const command = new DeleteIdentityProviderCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes network settings.

                                                                      + */ + public deleteNetworkSettings( + args: DeleteNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteNetworkSettings( + args: DeleteNetworkSettingsCommandInput, + cb: (err: any, data?: DeleteNetworkSettingsCommandOutput) => void + ): void; + public deleteNetworkSettings( + args: DeleteNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteNetworkSettingsCommandOutput) => void + ): void; + public deleteNetworkSettings( + args: DeleteNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: DeleteNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new DeleteNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes a web portal.

                                                                      + */ + public deletePortal( + args: DeletePortalCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deletePortal(args: DeletePortalCommandInput, cb: (err: any, data?: DeletePortalCommandOutput) => void): void; + public deletePortal( + args: DeletePortalCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeletePortalCommandOutput) => void + ): void; + public deletePortal( + args: DeletePortalCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeletePortalCommandOutput) => void), + cb?: (err: any, data?: DeletePortalCommandOutput) => void + ): Promise | void { + const command = new DeletePortalCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes the trust store.

                                                                      + */ + public deleteTrustStore( + args: DeleteTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteTrustStore( + args: DeleteTrustStoreCommandInput, + cb: (err: any, data?: DeleteTrustStoreCommandOutput) => void + ): void; + public deleteTrustStore( + args: DeleteTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteTrustStoreCommandOutput) => void + ): void; + public deleteTrustStore( + args: DeleteTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteTrustStoreCommandOutput) => void), + cb?: (err: any, data?: DeleteTrustStoreCommandOutput) => void + ): Promise | void { + const command = new DeleteTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Deletes user settings.

                                                                      + */ + public deleteUserSettings( + args: DeleteUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public deleteUserSettings( + args: DeleteUserSettingsCommandInput, + cb: (err: any, data?: DeleteUserSettingsCommandOutput) => void + ): void; + public deleteUserSettings( + args: DeleteUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DeleteUserSettingsCommandOutput) => void + ): void; + public deleteUserSettings( + args: DeleteUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DeleteUserSettingsCommandOutput) => void), + cb?: (err: any, data?: DeleteUserSettingsCommandOutput) => void + ): Promise | void { + const command = new DeleteUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Disassociates browser settings from a web portal.

                                                                      + */ + public disassociateBrowserSettings( + args: DisassociateBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateBrowserSettings( + args: DisassociateBrowserSettingsCommandInput, + cb: (err: any, data?: DisassociateBrowserSettingsCommandOutput) => void + ): void; + public disassociateBrowserSettings( + args: DisassociateBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateBrowserSettingsCommandOutput) => void + ): void; + public disassociateBrowserSettings( + args: DisassociateBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: DisassociateBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new DisassociateBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Disassociates network settings from a web portal.

                                                                      + */ + public disassociateNetworkSettings( + args: DisassociateNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateNetworkSettings( + args: DisassociateNetworkSettingsCommandInput, + cb: (err: any, data?: DisassociateNetworkSettingsCommandOutput) => void + ): void; + public disassociateNetworkSettings( + args: DisassociateNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateNetworkSettingsCommandOutput) => void + ): void; + public disassociateNetworkSettings( + args: DisassociateNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: DisassociateNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new DisassociateNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Disassociates a trust store from a web portal.

                                                                      + */ + public disassociateTrustStore( + args: DisassociateTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateTrustStore( + args: DisassociateTrustStoreCommandInput, + cb: (err: any, data?: DisassociateTrustStoreCommandOutput) => void + ): void; + public disassociateTrustStore( + args: DisassociateTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateTrustStoreCommandOutput) => void + ): void; + public disassociateTrustStore( + args: DisassociateTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateTrustStoreCommandOutput) => void), + cb?: (err: any, data?: DisassociateTrustStoreCommandOutput) => void + ): Promise | void { + const command = new DisassociateTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Disassociates user settings from a web portal.

                                                                      + */ + public disassociateUserSettings( + args: DisassociateUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public disassociateUserSettings( + args: DisassociateUserSettingsCommandInput, + cb: (err: any, data?: DisassociateUserSettingsCommandOutput) => void + ): void; + public disassociateUserSettings( + args: DisassociateUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: DisassociateUserSettingsCommandOutput) => void + ): void; + public disassociateUserSettings( + args: DisassociateUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: DisassociateUserSettingsCommandOutput) => void), + cb?: (err: any, data?: DisassociateUserSettingsCommandOutput) => void + ): Promise | void { + const command = new DisassociateUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets browser settings.

                                                                      + */ + public getBrowserSettings( + args: GetBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getBrowserSettings( + args: GetBrowserSettingsCommandInput, + cb: (err: any, data?: GetBrowserSettingsCommandOutput) => void + ): void; + public getBrowserSettings( + args: GetBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetBrowserSettingsCommandOutput) => void + ): void; + public getBrowserSettings( + args: GetBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: GetBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new GetBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the identity provider.

                                                                      + */ + public getIdentityProvider( + args: GetIdentityProviderCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getIdentityProvider( + args: GetIdentityProviderCommandInput, + cb: (err: any, data?: GetIdentityProviderCommandOutput) => void + ): void; + public getIdentityProvider( + args: GetIdentityProviderCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetIdentityProviderCommandOutput) => void + ): void; + public getIdentityProvider( + args: GetIdentityProviderCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetIdentityProviderCommandOutput) => void), + cb?: (err: any, data?: GetIdentityProviderCommandOutput) => void + ): Promise | void { + const command = new GetIdentityProviderCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the network settings.

                                                                      + */ + public getNetworkSettings( + args: GetNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getNetworkSettings( + args: GetNetworkSettingsCommandInput, + cb: (err: any, data?: GetNetworkSettingsCommandOutput) => void + ): void; + public getNetworkSettings( + args: GetNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetNetworkSettingsCommandOutput) => void + ): void; + public getNetworkSettings( + args: GetNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: GetNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new GetNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the web portal.

                                                                      + */ + public getPortal(args: GetPortalCommandInput, options?: __HttpHandlerOptions): Promise; + public getPortal(args: GetPortalCommandInput, cb: (err: any, data?: GetPortalCommandOutput) => void): void; + public getPortal( + args: GetPortalCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetPortalCommandOutput) => void + ): void; + public getPortal( + args: GetPortalCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetPortalCommandOutput) => void), + cb?: (err: any, data?: GetPortalCommandOutput) => void + ): Promise | void { + const command = new GetPortalCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the service provider metadata.

                                                                      + */ + public getPortalServiceProviderMetadata( + args: GetPortalServiceProviderMetadataCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getPortalServiceProviderMetadata( + args: GetPortalServiceProviderMetadataCommandInput, + cb: (err: any, data?: GetPortalServiceProviderMetadataCommandOutput) => void + ): void; + public getPortalServiceProviderMetadata( + args: GetPortalServiceProviderMetadataCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetPortalServiceProviderMetadataCommandOutput) => void + ): void; + public getPortalServiceProviderMetadata( + args: GetPortalServiceProviderMetadataCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetPortalServiceProviderMetadataCommandOutput) => void), + cb?: (err: any, data?: GetPortalServiceProviderMetadataCommandOutput) => void + ): Promise | void { + const command = new GetPortalServiceProviderMetadataCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the trust store.

                                                                      + */ + public getTrustStore( + args: GetTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getTrustStore( + args: GetTrustStoreCommandInput, + cb: (err: any, data?: GetTrustStoreCommandOutput) => void + ): void; + public getTrustStore( + args: GetTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTrustStoreCommandOutput) => void + ): void; + public getTrustStore( + args: GetTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTrustStoreCommandOutput) => void), + cb?: (err: any, data?: GetTrustStoreCommandOutput) => void + ): Promise | void { + const command = new GetTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets the trust store certificate.

                                                                      + */ + public getTrustStoreCertificate( + args: GetTrustStoreCertificateCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getTrustStoreCertificate( + args: GetTrustStoreCertificateCommandInput, + cb: (err: any, data?: GetTrustStoreCertificateCommandOutput) => void + ): void; + public getTrustStoreCertificate( + args: GetTrustStoreCertificateCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetTrustStoreCertificateCommandOutput) => void + ): void; + public getTrustStoreCertificate( + args: GetTrustStoreCertificateCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetTrustStoreCertificateCommandOutput) => void), + cb?: (err: any, data?: GetTrustStoreCertificateCommandOutput) => void + ): Promise | void { + const command = new GetTrustStoreCertificateCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Gets user settings.

                                                                      + */ + public getUserSettings( + args: GetUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public getUserSettings( + args: GetUserSettingsCommandInput, + cb: (err: any, data?: GetUserSettingsCommandOutput) => void + ): void; + public getUserSettings( + args: GetUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: GetUserSettingsCommandOutput) => void + ): void; + public getUserSettings( + args: GetUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: GetUserSettingsCommandOutput) => void), + cb?: (err: any, data?: GetUserSettingsCommandOutput) => void + ): Promise | void { + const command = new GetUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of browser settings.

                                                                      + */ + public listBrowserSettings( + args: ListBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listBrowserSettings( + args: ListBrowserSettingsCommandInput, + cb: (err: any, data?: ListBrowserSettingsCommandOutput) => void + ): void; + public listBrowserSettings( + args: ListBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListBrowserSettingsCommandOutput) => void + ): void; + public listBrowserSettings( + args: ListBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: ListBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new ListBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of identity providers for a specific web portal.

                                                                      + */ + public listIdentityProviders( + args: ListIdentityProvidersCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listIdentityProviders( + args: ListIdentityProvidersCommandInput, + cb: (err: any, data?: ListIdentityProvidersCommandOutput) => void + ): void; + public listIdentityProviders( + args: ListIdentityProvidersCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListIdentityProvidersCommandOutput) => void + ): void; + public listIdentityProviders( + args: ListIdentityProvidersCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListIdentityProvidersCommandOutput) => void), + cb?: (err: any, data?: ListIdentityProvidersCommandOutput) => void + ): Promise | void { + const command = new ListIdentityProvidersCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of network settings.

                                                                      + */ + public listNetworkSettings( + args: ListNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listNetworkSettings( + args: ListNetworkSettingsCommandInput, + cb: (err: any, data?: ListNetworkSettingsCommandOutput) => void + ): void; + public listNetworkSettings( + args: ListNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListNetworkSettingsCommandOutput) => void + ): void; + public listNetworkSettings( + args: ListNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: ListNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new ListNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list or web portals.

                                                                      + */ + public listPortals(args: ListPortalsCommandInput, options?: __HttpHandlerOptions): Promise; + public listPortals(args: ListPortalsCommandInput, cb: (err: any, data?: ListPortalsCommandOutput) => void): void; + public listPortals( + args: ListPortalsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListPortalsCommandOutput) => void + ): void; + public listPortals( + args: ListPortalsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListPortalsCommandOutput) => void), + cb?: (err: any, data?: ListPortalsCommandOutput) => void + ): Promise | void { + const command = new ListPortalsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of tags for a resource.

                                                                      + */ + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): void; + public listTagsForResource( + args: ListTagsForResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTagsForResourceCommandOutput) => void), + cb?: (err: any, data?: ListTagsForResourceCommandOutput) => void + ): Promise | void { + const command = new ListTagsForResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of trust store certificates.

                                                                      + */ + public listTrustStoreCertificates( + args: ListTrustStoreCertificatesCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTrustStoreCertificates( + args: ListTrustStoreCertificatesCommandInput, + cb: (err: any, data?: ListTrustStoreCertificatesCommandOutput) => void + ): void; + public listTrustStoreCertificates( + args: ListTrustStoreCertificatesCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTrustStoreCertificatesCommandOutput) => void + ): void; + public listTrustStoreCertificates( + args: ListTrustStoreCertificatesCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTrustStoreCertificatesCommandOutput) => void), + cb?: (err: any, data?: ListTrustStoreCertificatesCommandOutput) => void + ): Promise | void { + const command = new ListTrustStoreCertificatesCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of trust stores.

                                                                      + */ + public listTrustStores( + args: ListTrustStoresCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listTrustStores( + args: ListTrustStoresCommandInput, + cb: (err: any, data?: ListTrustStoresCommandOutput) => void + ): void; + public listTrustStores( + args: ListTrustStoresCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListTrustStoresCommandOutput) => void + ): void; + public listTrustStores( + args: ListTrustStoresCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListTrustStoresCommandOutput) => void), + cb?: (err: any, data?: ListTrustStoresCommandOutput) => void + ): Promise | void { + const command = new ListTrustStoresCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Retrieves a list of user settings.

                                                                      + */ + public listUserSettings( + args: ListUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public listUserSettings( + args: ListUserSettingsCommandInput, + cb: (err: any, data?: ListUserSettingsCommandOutput) => void + ): void; + public listUserSettings( + args: ListUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: ListUserSettingsCommandOutput) => void + ): void; + public listUserSettings( + args: ListUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: ListUserSettingsCommandOutput) => void), + cb?: (err: any, data?: ListUserSettingsCommandOutput) => void + ): Promise | void { + const command = new ListUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Adds or overwrites one or more tags for the specified resource.

                                                                      + */ + public tagResource(args: TagResourceCommandInput, options?: __HttpHandlerOptions): Promise; + public tagResource(args: TagResourceCommandInput, cb: (err: any, data?: TagResourceCommandOutput) => void): void; + public tagResource( + args: TagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: TagResourceCommandOutput) => void + ): void; + public tagResource( + args: TagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: TagResourceCommandOutput) => void), + cb?: (err: any, data?: TagResourceCommandOutput) => void + ): Promise | void { + const command = new TagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Removes one or more tags from the specified resource.

                                                                      + */ + public untagResource( + args: UntagResourceCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public untagResource( + args: UntagResourceCommandInput, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UntagResourceCommandOutput) => void + ): void; + public untagResource( + args: UntagResourceCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UntagResourceCommandOutput) => void), + cb?: (err: any, data?: UntagResourceCommandOutput) => void + ): Promise | void { + const command = new UntagResourceCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates browser settings.

                                                                      + */ + public updateBrowserSettings( + args: UpdateBrowserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateBrowserSettings( + args: UpdateBrowserSettingsCommandInput, + cb: (err: any, data?: UpdateBrowserSettingsCommandOutput) => void + ): void; + public updateBrowserSettings( + args: UpdateBrowserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateBrowserSettingsCommandOutput) => void + ): void; + public updateBrowserSettings( + args: UpdateBrowserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateBrowserSettingsCommandOutput) => void), + cb?: (err: any, data?: UpdateBrowserSettingsCommandOutput) => void + ): Promise | void { + const command = new UpdateBrowserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates the identity provider.

                                                                      + */ + public updateIdentityProvider( + args: UpdateIdentityProviderCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateIdentityProvider( + args: UpdateIdentityProviderCommandInput, + cb: (err: any, data?: UpdateIdentityProviderCommandOutput) => void + ): void; + public updateIdentityProvider( + args: UpdateIdentityProviderCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateIdentityProviderCommandOutput) => void + ): void; + public updateIdentityProvider( + args: UpdateIdentityProviderCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateIdentityProviderCommandOutput) => void), + cb?: (err: any, data?: UpdateIdentityProviderCommandOutput) => void + ): Promise | void { + const command = new UpdateIdentityProviderCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates network settings.

                                                                      + */ + public updateNetworkSettings( + args: UpdateNetworkSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateNetworkSettings( + args: UpdateNetworkSettingsCommandInput, + cb: (err: any, data?: UpdateNetworkSettingsCommandOutput) => void + ): void; + public updateNetworkSettings( + args: UpdateNetworkSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateNetworkSettingsCommandOutput) => void + ): void; + public updateNetworkSettings( + args: UpdateNetworkSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateNetworkSettingsCommandOutput) => void), + cb?: (err: any, data?: UpdateNetworkSettingsCommandOutput) => void + ): Promise | void { + const command = new UpdateNetworkSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates a web portal.

                                                                      + */ + public updatePortal( + args: UpdatePortalCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updatePortal(args: UpdatePortalCommandInput, cb: (err: any, data?: UpdatePortalCommandOutput) => void): void; + public updatePortal( + args: UpdatePortalCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdatePortalCommandOutput) => void + ): void; + public updatePortal( + args: UpdatePortalCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdatePortalCommandOutput) => void), + cb?: (err: any, data?: UpdatePortalCommandOutput) => void + ): Promise | void { + const command = new UpdatePortalCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates the trust store.

                                                                      + */ + public updateTrustStore( + args: UpdateTrustStoreCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateTrustStore( + args: UpdateTrustStoreCommandInput, + cb: (err: any, data?: UpdateTrustStoreCommandOutput) => void + ): void; + public updateTrustStore( + args: UpdateTrustStoreCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateTrustStoreCommandOutput) => void + ): void; + public updateTrustStore( + args: UpdateTrustStoreCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateTrustStoreCommandOutput) => void), + cb?: (err: any, data?: UpdateTrustStoreCommandOutput) => void + ): Promise | void { + const command = new UpdateTrustStoreCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } + + /** + *

                                                                      Updates the user settings.

                                                                      + */ + public updateUserSettings( + args: UpdateUserSettingsCommandInput, + options?: __HttpHandlerOptions + ): Promise; + public updateUserSettings( + args: UpdateUserSettingsCommandInput, + cb: (err: any, data?: UpdateUserSettingsCommandOutput) => void + ): void; + public updateUserSettings( + args: UpdateUserSettingsCommandInput, + options: __HttpHandlerOptions, + cb: (err: any, data?: UpdateUserSettingsCommandOutput) => void + ): void; + public updateUserSettings( + args: UpdateUserSettingsCommandInput, + optionsOrCb?: __HttpHandlerOptions | ((err: any, data?: UpdateUserSettingsCommandOutput) => void), + cb?: (err: any, data?: UpdateUserSettingsCommandOutput) => void + ): Promise | void { + const command = new UpdateUserSettingsCommand(args); + if (typeof optionsOrCb === "function") { + this.send(command, optionsOrCb); + } else if (typeof cb === "function") { + if (typeof optionsOrCb !== "object") throw new Error(`Expect http options but get ${typeof optionsOrCb}`); + this.send(command, optionsOrCb || {}, cb); + } else { + return this.send(command, optionsOrCb); + } + } +} diff --git a/clients/client-workspaces-web/src/WorkSpacesWebClient.ts b/clients/client-workspaces-web/src/WorkSpacesWebClient.ts new file mode 100644 index 000000000000..41cb4a74f35f --- /dev/null +++ b/clients/client-workspaces-web/src/WorkSpacesWebClient.ts @@ -0,0 +1,458 @@ +import { + EndpointsInputConfig, + EndpointsResolvedConfig, + RegionInputConfig, + RegionResolvedConfig, + resolveEndpointsConfig, + resolveRegionConfig, +} from "@aws-sdk/config-resolver"; +import { getContentLengthPlugin } from "@aws-sdk/middleware-content-length"; +import { + getHostHeaderPlugin, + HostHeaderInputConfig, + HostHeaderResolvedConfig, + resolveHostHeaderConfig, +} from "@aws-sdk/middleware-host-header"; +import { getLoggerPlugin } from "@aws-sdk/middleware-logger"; +import { getRetryPlugin, resolveRetryConfig, RetryInputConfig, RetryResolvedConfig } from "@aws-sdk/middleware-retry"; +import { + AwsAuthInputConfig, + AwsAuthResolvedConfig, + getAwsAuthPlugin, + resolveAwsAuthConfig, +} from "@aws-sdk/middleware-signing"; +import { + getUserAgentPlugin, + resolveUserAgentConfig, + UserAgentInputConfig, + UserAgentResolvedConfig, +} from "@aws-sdk/middleware-user-agent"; +import { HttpHandler as __HttpHandler } from "@aws-sdk/protocol-http"; +import { + Client as __Client, + SmithyConfiguration as __SmithyConfiguration, + SmithyResolvedConfiguration as __SmithyResolvedConfiguration, +} from "@aws-sdk/smithy-client"; +import { + Credentials as __Credentials, + Decoder as __Decoder, + Encoder as __Encoder, + Hash as __Hash, + HashConstructor as __HashConstructor, + HttpHandlerOptions as __HttpHandlerOptions, + Logger as __Logger, + Provider as __Provider, + Provider, + RegionInfoProvider, + StreamCollector as __StreamCollector, + UrlParser as __UrlParser, + UserAgent as __UserAgent, +} from "@aws-sdk/types"; + +import { + AssociateBrowserSettingsCommandInput, + AssociateBrowserSettingsCommandOutput, +} from "./commands/AssociateBrowserSettingsCommand"; +import { + AssociateNetworkSettingsCommandInput, + AssociateNetworkSettingsCommandOutput, +} from "./commands/AssociateNetworkSettingsCommand"; +import { + AssociateTrustStoreCommandInput, + AssociateTrustStoreCommandOutput, +} from "./commands/AssociateTrustStoreCommand"; +import { + AssociateUserSettingsCommandInput, + AssociateUserSettingsCommandOutput, +} from "./commands/AssociateUserSettingsCommand"; +import { + CreateBrowserSettingsCommandInput, + CreateBrowserSettingsCommandOutput, +} from "./commands/CreateBrowserSettingsCommand"; +import { + CreateIdentityProviderCommandInput, + CreateIdentityProviderCommandOutput, +} from "./commands/CreateIdentityProviderCommand"; +import { + CreateNetworkSettingsCommandInput, + CreateNetworkSettingsCommandOutput, +} from "./commands/CreateNetworkSettingsCommand"; +import { CreatePortalCommandInput, CreatePortalCommandOutput } from "./commands/CreatePortalCommand"; +import { CreateTrustStoreCommandInput, CreateTrustStoreCommandOutput } from "./commands/CreateTrustStoreCommand"; +import { CreateUserSettingsCommandInput, CreateUserSettingsCommandOutput } from "./commands/CreateUserSettingsCommand"; +import { + DeleteBrowserSettingsCommandInput, + DeleteBrowserSettingsCommandOutput, +} from "./commands/DeleteBrowserSettingsCommand"; +import { + DeleteIdentityProviderCommandInput, + DeleteIdentityProviderCommandOutput, +} from "./commands/DeleteIdentityProviderCommand"; +import { + DeleteNetworkSettingsCommandInput, + DeleteNetworkSettingsCommandOutput, +} from "./commands/DeleteNetworkSettingsCommand"; +import { DeletePortalCommandInput, DeletePortalCommandOutput } from "./commands/DeletePortalCommand"; +import { DeleteTrustStoreCommandInput, DeleteTrustStoreCommandOutput } from "./commands/DeleteTrustStoreCommand"; +import { DeleteUserSettingsCommandInput, DeleteUserSettingsCommandOutput } from "./commands/DeleteUserSettingsCommand"; +import { + DisassociateBrowserSettingsCommandInput, + DisassociateBrowserSettingsCommandOutput, +} from "./commands/DisassociateBrowserSettingsCommand"; +import { + DisassociateNetworkSettingsCommandInput, + DisassociateNetworkSettingsCommandOutput, +} from "./commands/DisassociateNetworkSettingsCommand"; +import { + DisassociateTrustStoreCommandInput, + DisassociateTrustStoreCommandOutput, +} from "./commands/DisassociateTrustStoreCommand"; +import { + DisassociateUserSettingsCommandInput, + DisassociateUserSettingsCommandOutput, +} from "./commands/DisassociateUserSettingsCommand"; +import { GetBrowserSettingsCommandInput, GetBrowserSettingsCommandOutput } from "./commands/GetBrowserSettingsCommand"; +import { + GetIdentityProviderCommandInput, + GetIdentityProviderCommandOutput, +} from "./commands/GetIdentityProviderCommand"; +import { GetNetworkSettingsCommandInput, GetNetworkSettingsCommandOutput } from "./commands/GetNetworkSettingsCommand"; +import { GetPortalCommandInput, GetPortalCommandOutput } from "./commands/GetPortalCommand"; +import { + GetPortalServiceProviderMetadataCommandInput, + GetPortalServiceProviderMetadataCommandOutput, +} from "./commands/GetPortalServiceProviderMetadataCommand"; +import { + GetTrustStoreCertificateCommandInput, + GetTrustStoreCertificateCommandOutput, +} from "./commands/GetTrustStoreCertificateCommand"; +import { GetTrustStoreCommandInput, GetTrustStoreCommandOutput } from "./commands/GetTrustStoreCommand"; +import { GetUserSettingsCommandInput, GetUserSettingsCommandOutput } from "./commands/GetUserSettingsCommand"; +import { + ListBrowserSettingsCommandInput, + ListBrowserSettingsCommandOutput, +} from "./commands/ListBrowserSettingsCommand"; +import { + ListIdentityProvidersCommandInput, + ListIdentityProvidersCommandOutput, +} from "./commands/ListIdentityProvidersCommand"; +import { + ListNetworkSettingsCommandInput, + ListNetworkSettingsCommandOutput, +} from "./commands/ListNetworkSettingsCommand"; +import { ListPortalsCommandInput, ListPortalsCommandOutput } from "./commands/ListPortalsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "./commands/ListTagsForResourceCommand"; +import { + ListTrustStoreCertificatesCommandInput, + ListTrustStoreCertificatesCommandOutput, +} from "./commands/ListTrustStoreCertificatesCommand"; +import { ListTrustStoresCommandInput, ListTrustStoresCommandOutput } from "./commands/ListTrustStoresCommand"; +import { ListUserSettingsCommandInput, ListUserSettingsCommandOutput } from "./commands/ListUserSettingsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "./commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "./commands/UntagResourceCommand"; +import { + UpdateBrowserSettingsCommandInput, + UpdateBrowserSettingsCommandOutput, +} from "./commands/UpdateBrowserSettingsCommand"; +import { + UpdateIdentityProviderCommandInput, + UpdateIdentityProviderCommandOutput, +} from "./commands/UpdateIdentityProviderCommand"; +import { + UpdateNetworkSettingsCommandInput, + UpdateNetworkSettingsCommandOutput, +} from "./commands/UpdateNetworkSettingsCommand"; +import { UpdatePortalCommandInput, UpdatePortalCommandOutput } from "./commands/UpdatePortalCommand"; +import { UpdateTrustStoreCommandInput, UpdateTrustStoreCommandOutput } from "./commands/UpdateTrustStoreCommand"; +import { UpdateUserSettingsCommandInput, UpdateUserSettingsCommandOutput } from "./commands/UpdateUserSettingsCommand"; +import { getRuntimeConfig as __getRuntimeConfig } from "./runtimeConfig"; + +export type ServiceInputTypes = + | AssociateBrowserSettingsCommandInput + | AssociateNetworkSettingsCommandInput + | AssociateTrustStoreCommandInput + | AssociateUserSettingsCommandInput + | CreateBrowserSettingsCommandInput + | CreateIdentityProviderCommandInput + | CreateNetworkSettingsCommandInput + | CreatePortalCommandInput + | CreateTrustStoreCommandInput + | CreateUserSettingsCommandInput + | DeleteBrowserSettingsCommandInput + | DeleteIdentityProviderCommandInput + | DeleteNetworkSettingsCommandInput + | DeletePortalCommandInput + | DeleteTrustStoreCommandInput + | DeleteUserSettingsCommandInput + | DisassociateBrowserSettingsCommandInput + | DisassociateNetworkSettingsCommandInput + | DisassociateTrustStoreCommandInput + | DisassociateUserSettingsCommandInput + | GetBrowserSettingsCommandInput + | GetIdentityProviderCommandInput + | GetNetworkSettingsCommandInput + | GetPortalCommandInput + | GetPortalServiceProviderMetadataCommandInput + | GetTrustStoreCertificateCommandInput + | GetTrustStoreCommandInput + | GetUserSettingsCommandInput + | ListBrowserSettingsCommandInput + | ListIdentityProvidersCommandInput + | ListNetworkSettingsCommandInput + | ListPortalsCommandInput + | ListTagsForResourceCommandInput + | ListTrustStoreCertificatesCommandInput + | ListTrustStoresCommandInput + | ListUserSettingsCommandInput + | TagResourceCommandInput + | UntagResourceCommandInput + | UpdateBrowserSettingsCommandInput + | UpdateIdentityProviderCommandInput + | UpdateNetworkSettingsCommandInput + | UpdatePortalCommandInput + | UpdateTrustStoreCommandInput + | UpdateUserSettingsCommandInput; + +export type ServiceOutputTypes = + | AssociateBrowserSettingsCommandOutput + | AssociateNetworkSettingsCommandOutput + | AssociateTrustStoreCommandOutput + | AssociateUserSettingsCommandOutput + | CreateBrowserSettingsCommandOutput + | CreateIdentityProviderCommandOutput + | CreateNetworkSettingsCommandOutput + | CreatePortalCommandOutput + | CreateTrustStoreCommandOutput + | CreateUserSettingsCommandOutput + | DeleteBrowserSettingsCommandOutput + | DeleteIdentityProviderCommandOutput + | DeleteNetworkSettingsCommandOutput + | DeletePortalCommandOutput + | DeleteTrustStoreCommandOutput + | DeleteUserSettingsCommandOutput + | DisassociateBrowserSettingsCommandOutput + | DisassociateNetworkSettingsCommandOutput + | DisassociateTrustStoreCommandOutput + | DisassociateUserSettingsCommandOutput + | GetBrowserSettingsCommandOutput + | GetIdentityProviderCommandOutput + | GetNetworkSettingsCommandOutput + | GetPortalCommandOutput + | GetPortalServiceProviderMetadataCommandOutput + | GetTrustStoreCertificateCommandOutput + | GetTrustStoreCommandOutput + | GetUserSettingsCommandOutput + | ListBrowserSettingsCommandOutput + | ListIdentityProvidersCommandOutput + | ListNetworkSettingsCommandOutput + | ListPortalsCommandOutput + | ListTagsForResourceCommandOutput + | ListTrustStoreCertificatesCommandOutput + | ListTrustStoresCommandOutput + | ListUserSettingsCommandOutput + | TagResourceCommandOutput + | UntagResourceCommandOutput + | UpdateBrowserSettingsCommandOutput + | UpdateIdentityProviderCommandOutput + | UpdateNetworkSettingsCommandOutput + | UpdatePortalCommandOutput + | UpdateTrustStoreCommandOutput + | UpdateUserSettingsCommandOutput; + +export interface ClientDefaults extends Partial<__SmithyResolvedConfiguration<__HttpHandlerOptions>> { + /** + * The HTTP handler to use. Fetch in browser and Https in Nodejs. + */ + requestHandler?: __HttpHandler; + + /** + * A constructor for a class implementing the {@link __Hash} interface + * that computes the SHA-256 HMAC or checksum of a string or binary buffer. + * @internal + */ + sha256?: __HashConstructor; + + /** + * The function that will be used to convert strings into HTTP endpoints. + * @internal + */ + urlParser?: __UrlParser; + + /** + * A function that can calculate the length of a request body. + * @internal + */ + bodyLengthChecker?: (body: any) => number | undefined; + + /** + * A function that converts a stream into an array of bytes. + * @internal + */ + streamCollector?: __StreamCollector; + + /** + * The function that will be used to convert a base64-encoded string to a byte array. + * @internal + */ + base64Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a base64-encoded string. + * @internal + */ + base64Encoder?: __Encoder; + + /** + * The function that will be used to convert a UTF8-encoded string to a byte array. + * @internal + */ + utf8Decoder?: __Decoder; + + /** + * The function that will be used to convert binary data to a UTF-8 encoded string. + * @internal + */ + utf8Encoder?: __Encoder; + + /** + * The runtime environment. + * @internal + */ + runtime?: string; + + /** + * Disable dyanamically changing the endpoint of the client based on the hostPrefix + * trait of an operation. + */ + disableHostPrefix?: boolean; + + /** + * Value for how many times a request will be made at most in case of retry. + */ + maxAttempts?: number | __Provider; + + /** + * Specifies which retry algorithm to use. + */ + retryMode?: string | __Provider; + + /** + * Optional logger for logging debug/info/warn/error. + */ + logger?: __Logger; + + /** + * Enables IPv6/IPv4 dualstack endpoint. + */ + useDualstackEndpoint?: boolean | __Provider; + + /** + * Enables FIPS compatible endpoints. + */ + useFipsEndpoint?: boolean | __Provider; + + /** + * Unique service identifier. + * @internal + */ + serviceId?: string; + + /** + * The AWS region to which this client will send requests + */ + region?: string | __Provider; + + /** + * Default credentials provider; Not available in browser runtime. + * @internal + */ + credentialDefaultProvider?: (input: any) => __Provider<__Credentials>; + + /** + * Fetch related hostname, signing name or signing region with given region. + * @internal + */ + regionInfoProvider?: RegionInfoProvider; + + /** + * The provider populating default tracking information to be sent with `user-agent`, `x-amz-user-agent` header + * @internal + */ + defaultUserAgentProvider?: Provider<__UserAgent>; +} + +type WorkSpacesWebClientConfigType = Partial<__SmithyConfiguration<__HttpHandlerOptions>> & + ClientDefaults & + RegionInputConfig & + EndpointsInputConfig & + RetryInputConfig & + HostHeaderInputConfig & + AwsAuthInputConfig & + UserAgentInputConfig; +/** + * The configuration interface of WorkSpacesWebClient class constructor that set the region, credentials and other options. + */ +export interface WorkSpacesWebClientConfig extends WorkSpacesWebClientConfigType {} + +type WorkSpacesWebClientResolvedConfigType = __SmithyResolvedConfiguration<__HttpHandlerOptions> & + Required & + RegionResolvedConfig & + EndpointsResolvedConfig & + RetryResolvedConfig & + HostHeaderResolvedConfig & + AwsAuthResolvedConfig & + UserAgentResolvedConfig; +/** + * The resolved configuration interface of WorkSpacesWebClient class. This is resolved and normalized from the {@link WorkSpacesWebClientConfig | constructor configuration interface}. + */ +export interface WorkSpacesWebClientResolvedConfig extends WorkSpacesWebClientResolvedConfigType {} + +/** + *

                                                                      WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate + * secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide + * their employees with access to internal websites and SaaS web applications without the + * administrative burden of appliances or specialized client software. WorkSpaces Web provides + * simple policy tools tailored for user interactions, while offloading common tasks like + * capacity management, scaling, and maintaining browser images.

                                                                      + */ +export class WorkSpacesWebClient extends __Client< + __HttpHandlerOptions, + ServiceInputTypes, + ServiceOutputTypes, + WorkSpacesWebClientResolvedConfig +> { + /** + * The resolved configuration of WorkSpacesWebClient class. This is resolved and normalized from the {@link WorkSpacesWebClientConfig | constructor configuration interface}. + */ + readonly config: WorkSpacesWebClientResolvedConfig; + + constructor(configuration: WorkSpacesWebClientConfig) { + const _config_0 = __getRuntimeConfig(configuration); + const _config_1 = resolveRegionConfig(_config_0); + const _config_2 = resolveEndpointsConfig(_config_1); + const _config_3 = resolveRetryConfig(_config_2); + const _config_4 = resolveHostHeaderConfig(_config_3); + const _config_5 = resolveAwsAuthConfig(_config_4); + const _config_6 = resolveUserAgentConfig(_config_5); + super(_config_6); + this.config = _config_6; + this.middlewareStack.use(getRetryPlugin(this.config)); + this.middlewareStack.use(getContentLengthPlugin(this.config)); + this.middlewareStack.use(getHostHeaderPlugin(this.config)); + this.middlewareStack.use(getLoggerPlugin(this.config)); + this.middlewareStack.use(getAwsAuthPlugin(this.config)); + this.middlewareStack.use(getUserAgentPlugin(this.config)); + } + + /** + * Destroy underlying resources, like sockets. It's usually not necessary to do this. + * However in Node.js, it's best to explicitly shut down the client's agent when it is no longer needed. + * Otherwise, sockets might stay open for quite a long time before the server terminates them. + */ + destroy(): void { + super.destroy(); + } +} diff --git a/clients/client-workspaces-web/src/commands/AssociateBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/AssociateBrowserSettingsCommand.ts new file mode 100644 index 000000000000..9f081434c461 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/AssociateBrowserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AssociateBrowserSettingsRequest, AssociateBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateBrowserSettingsCommand, + serializeAws_restJson1AssociateBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface AssociateBrowserSettingsCommandInput extends AssociateBrowserSettingsRequest {} +export interface AssociateBrowserSettingsCommandOutput extends AssociateBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Associates a browser settings resource with a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, AssociateBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, AssociateBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new AssociateBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link AssociateBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class AssociateBrowserSettingsCommand extends $Command< + AssociateBrowserSettingsCommandInput, + AssociateBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "AssociateBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1AssociateBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/AssociateNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/AssociateNetworkSettingsCommand.ts new file mode 100644 index 000000000000..8a31a6d85cf2 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/AssociateNetworkSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AssociateNetworkSettingsRequest, AssociateNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateNetworkSettingsCommand, + serializeAws_restJson1AssociateNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface AssociateNetworkSettingsCommandInput extends AssociateNetworkSettingsRequest {} +export interface AssociateNetworkSettingsCommandOutput extends AssociateNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Associates a network settings resource with a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, AssociateNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, AssociateNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new AssociateNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link AssociateNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class AssociateNetworkSettingsCommand extends $Command< + AssociateNetworkSettingsCommandInput, + AssociateNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "AssociateNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1AssociateNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/AssociateTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/AssociateTrustStoreCommand.ts new file mode 100644 index 000000000000..3f7e99115195 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/AssociateTrustStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AssociateTrustStoreRequest, AssociateTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateTrustStoreCommand, + serializeAws_restJson1AssociateTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface AssociateTrustStoreCommandInput extends AssociateTrustStoreRequest {} +export interface AssociateTrustStoreCommandOutput extends AssociateTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Associates a trust store with a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, AssociateTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, AssociateTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new AssociateTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateTrustStoreCommandInput} for command's `input` shape. + * @see {@link AssociateTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class AssociateTrustStoreCommand extends $Command< + AssociateTrustStoreCommandInput, + AssociateTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "AssociateTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1AssociateTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/AssociateUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/AssociateUserSettingsCommand.ts new file mode 100644 index 000000000000..c36583452e5d --- /dev/null +++ b/clients/client-workspaces-web/src/commands/AssociateUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { AssociateUserSettingsRequest, AssociateUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1AssociateUserSettingsCommand, + serializeAws_restJson1AssociateUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface AssociateUserSettingsCommandInput extends AssociateUserSettingsRequest {} +export interface AssociateUserSettingsCommandOutput extends AssociateUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Associates a user settings resource with a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, AssociateUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, AssociateUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new AssociateUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link AssociateUserSettingsCommandInput} for command's `input` shape. + * @see {@link AssociateUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class AssociateUserSettingsCommand extends $Command< + AssociateUserSettingsCommandInput, + AssociateUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: AssociateUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "AssociateUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: AssociateUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: AssociateUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: AssociateUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1AssociateUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1AssociateUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreateBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/CreateBrowserSettingsCommand.ts new file mode 100644 index 000000000000..5a7355cedfe2 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreateBrowserSettingsCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateBrowserSettingsRequest, CreateBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateBrowserSettingsCommand, + serializeAws_restJson1CreateBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreateBrowserSettingsCommandInput extends CreateBrowserSettingsRequest {} +export interface CreateBrowserSettingsCommandOutput extends CreateBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Creates a browser settings resource that can be associated with a web portal. Once + * associated with a web portal, browser settings control how the browser will behave once a + * user starts a streaming session for the web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreateBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreateBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreateBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link CreateBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreateBrowserSettingsCommand extends $Command< + CreateBrowserSettingsCommandInput, + CreateBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreateBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreateIdentityProviderCommand.ts b/clients/client-workspaces-web/src/commands/CreateIdentityProviderCommand.ts new file mode 100644 index 000000000000..f9c059d395f2 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreateIdentityProviderCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateIdentityProviderRequest, CreateIdentityProviderResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateIdentityProviderCommand, + serializeAws_restJson1CreateIdentityProviderCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreateIdentityProviderCommandInput extends CreateIdentityProviderRequest {} +export interface CreateIdentityProviderCommandOutput extends CreateIdentityProviderResponse, __MetadataBearer {} + +/** + *

                                                                      Creates an identity provider resource that is then associated with a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreateIdentityProviderCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreateIdentityProviderCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreateIdentityProviderCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateIdentityProviderCommandInput} for command's `input` shape. + * @see {@link CreateIdentityProviderCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreateIdentityProviderCommand extends $Command< + CreateIdentityProviderCommandInput, + CreateIdentityProviderCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateIdentityProviderCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreateIdentityProviderCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateIdentityProviderRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateIdentityProviderResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateIdentityProviderCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateIdentityProviderCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateIdentityProviderCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreateNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/CreateNetworkSettingsCommand.ts new file mode 100644 index 000000000000..a48ba391d37e --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreateNetworkSettingsCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateNetworkSettingsRequest, CreateNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateNetworkSettingsCommand, + serializeAws_restJson1CreateNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreateNetworkSettingsCommandInput extends CreateNetworkSettingsRequest {} +export interface CreateNetworkSettingsCommandOutput extends CreateNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Creates a network settings resource that can be associated with a web portal. Once + * associated with a web portal, network settings define how streaming instances will connect + * with your specified VPC.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreateNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreateNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreateNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link CreateNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreateNetworkSettingsCommand extends $Command< + CreateNetworkSettingsCommandInput, + CreateNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreateNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreatePortalCommand.ts b/clients/client-workspaces-web/src/commands/CreatePortalCommand.ts new file mode 100644 index 000000000000..9e9cb7019308 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreatePortalCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreatePortalRequest, CreatePortalResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreatePortalCommand, + serializeAws_restJson1CreatePortalCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreatePortalCommandInput extends CreatePortalRequest {} +export interface CreatePortalCommandOutput extends CreatePortalResponse, __MetadataBearer {} + +/** + *

                                                                      Creates a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreatePortalCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreatePortalCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreatePortalCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreatePortalCommandInput} for command's `input` shape. + * @see {@link CreatePortalCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreatePortalCommand extends $Command< + CreatePortalCommandInput, + CreatePortalCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreatePortalCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreatePortalCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreatePortalRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreatePortalResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreatePortalCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreatePortalCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreatePortalCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreateTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/CreateTrustStoreCommand.ts new file mode 100644 index 000000000000..0af62577ac9e --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreateTrustStoreCommand.ts @@ -0,0 +1,99 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateTrustStoreRequest, CreateTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateTrustStoreCommand, + serializeAws_restJson1CreateTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreateTrustStoreCommandInput extends CreateTrustStoreRequest {} +export interface CreateTrustStoreCommandOutput extends CreateTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Creates a trust store that can be associated with a web portal. A trust store contains + * certificate authority (CA) certificates. Once associated with a web portal, the browser in + * a streaming session will recognize certificates that have been issued using any of the CAs + * in the trust store. If your organization has internal websites that use certificates issued + * by private CAs, you should add the private CA certificate to the trust store.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreateTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreateTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreateTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateTrustStoreCommandInput} for command's `input` shape. + * @see {@link CreateTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreateTrustStoreCommand extends $Command< + CreateTrustStoreCommandInput, + CreateTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreateTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/CreateUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/CreateUserSettingsCommand.ts new file mode 100644 index 000000000000..ae18d4d370d4 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/CreateUserSettingsCommand.ts @@ -0,0 +1,97 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { CreateUserSettingsRequest, CreateUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1CreateUserSettingsCommand, + serializeAws_restJson1CreateUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface CreateUserSettingsCommandInput extends CreateUserSettingsRequest {} +export interface CreateUserSettingsCommandOutput extends CreateUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Creates a user settings resource that can be associated with a web portal. Once + * associated with a web portal, user settings control how users can transfer data between a + * streaming session and the their local devices.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, CreateUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, CreateUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new CreateUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link CreateUserSettingsCommandInput} for command's `input` shape. + * @see {@link CreateUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class CreateUserSettingsCommand extends $Command< + CreateUserSettingsCommandInput, + CreateUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: CreateUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "CreateUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: CreateUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: CreateUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: CreateUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1CreateUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1CreateUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeleteBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DeleteBrowserSettingsCommand.ts new file mode 100644 index 000000000000..513774c8e252 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeleteBrowserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteBrowserSettingsRequest, DeleteBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteBrowserSettingsCommand, + serializeAws_restJson1DeleteBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeleteBrowserSettingsCommandInput extends DeleteBrowserSettingsRequest {} +export interface DeleteBrowserSettingsCommandOutput extends DeleteBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes browser settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeleteBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeleteBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeleteBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link DeleteBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeleteBrowserSettingsCommand extends $Command< + DeleteBrowserSettingsCommandInput, + DeleteBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeleteBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeleteIdentityProviderCommand.ts b/clients/client-workspaces-web/src/commands/DeleteIdentityProviderCommand.ts new file mode 100644 index 000000000000..fae1e59ad1c4 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeleteIdentityProviderCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteIdentityProviderRequest, DeleteIdentityProviderResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteIdentityProviderCommand, + serializeAws_restJson1DeleteIdentityProviderCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeleteIdentityProviderCommandInput extends DeleteIdentityProviderRequest {} +export interface DeleteIdentityProviderCommandOutput extends DeleteIdentityProviderResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes the identity provider.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeleteIdentityProviderCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeleteIdentityProviderCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeleteIdentityProviderCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteIdentityProviderCommandInput} for command's `input` shape. + * @see {@link DeleteIdentityProviderCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeleteIdentityProviderCommand extends $Command< + DeleteIdentityProviderCommandInput, + DeleteIdentityProviderCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteIdentityProviderCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeleteIdentityProviderCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteIdentityProviderRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteIdentityProviderResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteIdentityProviderCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteIdentityProviderCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteIdentityProviderCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeleteNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DeleteNetworkSettingsCommand.ts new file mode 100644 index 000000000000..458b7b90ea04 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeleteNetworkSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteNetworkSettingsRequest, DeleteNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteNetworkSettingsCommand, + serializeAws_restJson1DeleteNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeleteNetworkSettingsCommandInput extends DeleteNetworkSettingsRequest {} +export interface DeleteNetworkSettingsCommandOutput extends DeleteNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes network settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeleteNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeleteNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeleteNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link DeleteNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeleteNetworkSettingsCommand extends $Command< + DeleteNetworkSettingsCommandInput, + DeleteNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeleteNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeletePortalCommand.ts b/clients/client-workspaces-web/src/commands/DeletePortalCommand.ts new file mode 100644 index 000000000000..a223805ba919 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeletePortalCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeletePortalRequest, DeletePortalResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeletePortalCommand, + serializeAws_restJson1DeletePortalCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeletePortalCommandInput extends DeletePortalRequest {} +export interface DeletePortalCommandOutput extends DeletePortalResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeletePortalCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeletePortalCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeletePortalCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeletePortalCommandInput} for command's `input` shape. + * @see {@link DeletePortalCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeletePortalCommand extends $Command< + DeletePortalCommandInput, + DeletePortalCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeletePortalCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeletePortalCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeletePortalRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeletePortalResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeletePortalCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeletePortalCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeletePortalCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeleteTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/DeleteTrustStoreCommand.ts new file mode 100644 index 000000000000..246d8a380940 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeleteTrustStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteTrustStoreRequest, DeleteTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteTrustStoreCommand, + serializeAws_restJson1DeleteTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeleteTrustStoreCommandInput extends DeleteTrustStoreRequest {} +export interface DeleteTrustStoreCommandOutput extends DeleteTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes the trust store.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeleteTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeleteTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeleteTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteTrustStoreCommandInput} for command's `input` shape. + * @see {@link DeleteTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeleteTrustStoreCommand extends $Command< + DeleteTrustStoreCommandInput, + DeleteTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeleteTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DeleteUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DeleteUserSettingsCommand.ts new file mode 100644 index 000000000000..6d19e69fa1f7 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DeleteUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DeleteUserSettingsRequest, DeleteUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DeleteUserSettingsCommand, + serializeAws_restJson1DeleteUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DeleteUserSettingsCommandInput extends DeleteUserSettingsRequest {} +export interface DeleteUserSettingsCommandOutput extends DeleteUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Deletes user settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DeleteUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DeleteUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DeleteUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DeleteUserSettingsCommandInput} for command's `input` shape. + * @see {@link DeleteUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DeleteUserSettingsCommand extends $Command< + DeleteUserSettingsCommandInput, + DeleteUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DeleteUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DeleteUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DeleteUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DeleteUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DeleteUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DeleteUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DeleteUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DisassociateBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DisassociateBrowserSettingsCommand.ts new file mode 100644 index 000000000000..7238a09fbd12 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DisassociateBrowserSettingsCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DisassociateBrowserSettingsRequest, DisassociateBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateBrowserSettingsCommand, + serializeAws_restJson1DisassociateBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DisassociateBrowserSettingsCommandInput extends DisassociateBrowserSettingsRequest {} +export interface DisassociateBrowserSettingsCommandOutput + extends DisassociateBrowserSettingsResponse, + __MetadataBearer {} + +/** + *

                                                                      Disassociates browser settings from a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DisassociateBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DisassociateBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DisassociateBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link DisassociateBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DisassociateBrowserSettingsCommand extends $Command< + DisassociateBrowserSettingsCommandInput, + DisassociateBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DisassociateBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateBrowserSettingsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DisassociateBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DisassociateNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DisassociateNetworkSettingsCommand.ts new file mode 100644 index 000000000000..26e6d0c1978c --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DisassociateNetworkSettingsCommand.ts @@ -0,0 +1,100 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DisassociateNetworkSettingsRequest, DisassociateNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateNetworkSettingsCommand, + serializeAws_restJson1DisassociateNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DisassociateNetworkSettingsCommandInput extends DisassociateNetworkSettingsRequest {} +export interface DisassociateNetworkSettingsCommandOutput + extends DisassociateNetworkSettingsResponse, + __MetadataBearer {} + +/** + *

                                                                      Disassociates network settings from a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DisassociateNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DisassociateNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DisassociateNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link DisassociateNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DisassociateNetworkSettingsCommand extends $Command< + DisassociateNetworkSettingsCommandInput, + DisassociateNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DisassociateNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateNetworkSettingsCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1DisassociateNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DisassociateTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/DisassociateTrustStoreCommand.ts new file mode 100644 index 000000000000..3c83ecb22212 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DisassociateTrustStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DisassociateTrustStoreRequest, DisassociateTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateTrustStoreCommand, + serializeAws_restJson1DisassociateTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DisassociateTrustStoreCommandInput extends DisassociateTrustStoreRequest {} +export interface DisassociateTrustStoreCommandOutput extends DisassociateTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Disassociates a trust store from a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DisassociateTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DisassociateTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DisassociateTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateTrustStoreCommandInput} for command's `input` shape. + * @see {@link DisassociateTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DisassociateTrustStoreCommand extends $Command< + DisassociateTrustStoreCommandInput, + DisassociateTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DisassociateTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DisassociateTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/DisassociateUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/DisassociateUserSettingsCommand.ts new file mode 100644 index 000000000000..f6218e73906a --- /dev/null +++ b/clients/client-workspaces-web/src/commands/DisassociateUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { DisassociateUserSettingsRequest, DisassociateUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1DisassociateUserSettingsCommand, + serializeAws_restJson1DisassociateUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface DisassociateUserSettingsCommandInput extends DisassociateUserSettingsRequest {} +export interface DisassociateUserSettingsCommandOutput extends DisassociateUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Disassociates user settings from a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, DisassociateUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, DisassociateUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new DisassociateUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link DisassociateUserSettingsCommandInput} for command's `input` shape. + * @see {@link DisassociateUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class DisassociateUserSettingsCommand extends $Command< + DisassociateUserSettingsCommandInput, + DisassociateUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: DisassociateUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "DisassociateUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: DisassociateUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: DisassociateUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: DisassociateUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1DisassociateUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1DisassociateUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/GetBrowserSettingsCommand.ts new file mode 100644 index 000000000000..9d69f36bda59 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetBrowserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetBrowserSettingsRequest, GetBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetBrowserSettingsCommand, + serializeAws_restJson1GetBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetBrowserSettingsCommandInput extends GetBrowserSettingsRequest {} +export interface GetBrowserSettingsCommandOutput extends GetBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Gets browser settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link GetBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetBrowserSettingsCommand extends $Command< + GetBrowserSettingsCommandInput, + GetBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetIdentityProviderCommand.ts b/clients/client-workspaces-web/src/commands/GetIdentityProviderCommand.ts new file mode 100644 index 000000000000..48a09ccf24bb --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetIdentityProviderCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetIdentityProviderRequest, GetIdentityProviderResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetIdentityProviderCommand, + serializeAws_restJson1GetIdentityProviderCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetIdentityProviderCommandInput extends GetIdentityProviderRequest {} +export interface GetIdentityProviderCommandOutput extends GetIdentityProviderResponse, __MetadataBearer {} + +/** + *

                                                                      Gets the identity provider.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetIdentityProviderCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetIdentityProviderCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetIdentityProviderCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetIdentityProviderCommandInput} for command's `input` shape. + * @see {@link GetIdentityProviderCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetIdentityProviderCommand extends $Command< + GetIdentityProviderCommandInput, + GetIdentityProviderCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetIdentityProviderCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetIdentityProviderCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetIdentityProviderRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetIdentityProviderResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetIdentityProviderCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetIdentityProviderCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetIdentityProviderCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/GetNetworkSettingsCommand.ts new file mode 100644 index 000000000000..1c78234eec16 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetNetworkSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetNetworkSettingsRequest, GetNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetNetworkSettingsCommand, + serializeAws_restJson1GetNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetNetworkSettingsCommandInput extends GetNetworkSettingsRequest {} +export interface GetNetworkSettingsCommandOutput extends GetNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Gets the network settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link GetNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetNetworkSettingsCommand extends $Command< + GetNetworkSettingsCommandInput, + GetNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetPortalCommand.ts b/clients/client-workspaces-web/src/commands/GetPortalCommand.ts new file mode 100644 index 000000000000..8bc5953eb1b9 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetPortalCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetPortalRequest, GetPortalResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetPortalCommand, + serializeAws_restJson1GetPortalCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetPortalCommandInput extends GetPortalRequest {} +export interface GetPortalCommandOutput extends GetPortalResponse, __MetadataBearer {} + +/** + *

                                                                      Gets the web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetPortalCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetPortalCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetPortalCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetPortalCommandInput} for command's `input` shape. + * @see {@link GetPortalCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetPortalCommand extends $Command< + GetPortalCommandInput, + GetPortalCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetPortalCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetPortalCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetPortalRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetPortalResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetPortalCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetPortalCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetPortalCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetPortalServiceProviderMetadataCommand.ts b/clients/client-workspaces-web/src/commands/GetPortalServiceProviderMetadataCommand.ts new file mode 100644 index 000000000000..7e967aefce63 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetPortalServiceProviderMetadataCommand.ts @@ -0,0 +1,103 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetPortalServiceProviderMetadataRequest, GetPortalServiceProviderMetadataResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetPortalServiceProviderMetadataCommand, + serializeAws_restJson1GetPortalServiceProviderMetadataCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetPortalServiceProviderMetadataCommandInput extends GetPortalServiceProviderMetadataRequest {} +export interface GetPortalServiceProviderMetadataCommandOutput + extends GetPortalServiceProviderMetadataResponse, + __MetadataBearer {} + +/** + *

                                                                      Gets the service provider metadata.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetPortalServiceProviderMetadataCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetPortalServiceProviderMetadataCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetPortalServiceProviderMetadataCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetPortalServiceProviderMetadataCommandInput} for command's `input` shape. + * @see {@link GetPortalServiceProviderMetadataCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetPortalServiceProviderMetadataCommand extends $Command< + GetPortalServiceProviderMetadataCommandInput, + GetPortalServiceProviderMetadataCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetPortalServiceProviderMetadataCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetPortalServiceProviderMetadataCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetPortalServiceProviderMetadataRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetPortalServiceProviderMetadataResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize( + input: GetPortalServiceProviderMetadataCommandInput, + context: __SerdeContext + ): Promise<__HttpRequest> { + return serializeAws_restJson1GetPortalServiceProviderMetadataCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1GetPortalServiceProviderMetadataCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetTrustStoreCertificateCommand.ts b/clients/client-workspaces-web/src/commands/GetTrustStoreCertificateCommand.ts new file mode 100644 index 000000000000..6efb7992cc88 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetTrustStoreCertificateCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetTrustStoreCertificateRequest, GetTrustStoreCertificateResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetTrustStoreCertificateCommand, + serializeAws_restJson1GetTrustStoreCertificateCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetTrustStoreCertificateCommandInput extends GetTrustStoreCertificateRequest {} +export interface GetTrustStoreCertificateCommandOutput extends GetTrustStoreCertificateResponse, __MetadataBearer {} + +/** + *

                                                                      Gets the trust store certificate.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetTrustStoreCertificateCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetTrustStoreCertificateCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetTrustStoreCertificateCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTrustStoreCertificateCommandInput} for command's `input` shape. + * @see {@link GetTrustStoreCertificateCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetTrustStoreCertificateCommand extends $Command< + GetTrustStoreCertificateCommandInput, + GetTrustStoreCertificateCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTrustStoreCertificateCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetTrustStoreCertificateCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTrustStoreCertificateRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTrustStoreCertificateResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetTrustStoreCertificateCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetTrustStoreCertificateCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetTrustStoreCertificateCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/GetTrustStoreCommand.ts new file mode 100644 index 000000000000..2b36473a8611 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetTrustStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetTrustStoreRequest, GetTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetTrustStoreCommand, + serializeAws_restJson1GetTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetTrustStoreCommandInput extends GetTrustStoreRequest {} +export interface GetTrustStoreCommandOutput extends GetTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Gets the trust store.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetTrustStoreCommandInput} for command's `input` shape. + * @see {@link GetTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetTrustStoreCommand extends $Command< + GetTrustStoreCommandInput, + GetTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/GetUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/GetUserSettingsCommand.ts new file mode 100644 index 000000000000..f25e422a7e6b --- /dev/null +++ b/clients/client-workspaces-web/src/commands/GetUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { GetUserSettingsRequest, GetUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1GetUserSettingsCommand, + serializeAws_restJson1GetUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface GetUserSettingsCommandInput extends GetUserSettingsRequest {} +export interface GetUserSettingsCommandOutput extends GetUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Gets user settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, GetUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, GetUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new GetUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link GetUserSettingsCommandInput} for command's `input` shape. + * @see {@link GetUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class GetUserSettingsCommand extends $Command< + GetUserSettingsCommandInput, + GetUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: GetUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "GetUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: GetUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: GetUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: GetUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1GetUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1GetUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/ListBrowserSettingsCommand.ts new file mode 100644 index 000000000000..fe2bd5dc2378 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListBrowserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListBrowserSettingsRequest, ListBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListBrowserSettingsCommand, + serializeAws_restJson1ListBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListBrowserSettingsCommandInput extends ListBrowserSettingsRequest {} +export interface ListBrowserSettingsCommandOutput extends ListBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of browser settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link ListBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListBrowserSettingsCommand extends $Command< + ListBrowserSettingsCommandInput, + ListBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListIdentityProvidersCommand.ts b/clients/client-workspaces-web/src/commands/ListIdentityProvidersCommand.ts new file mode 100644 index 000000000000..367f85acc85b --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListIdentityProvidersCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListIdentityProvidersRequest, ListIdentityProvidersResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListIdentityProvidersCommand, + serializeAws_restJson1ListIdentityProvidersCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListIdentityProvidersCommandInput extends ListIdentityProvidersRequest {} +export interface ListIdentityProvidersCommandOutput extends ListIdentityProvidersResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of identity providers for a specific web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListIdentityProvidersCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListIdentityProvidersCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListIdentityProvidersCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListIdentityProvidersCommandInput} for command's `input` shape. + * @see {@link ListIdentityProvidersCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListIdentityProvidersCommand extends $Command< + ListIdentityProvidersCommandInput, + ListIdentityProvidersCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListIdentityProvidersCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListIdentityProvidersCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListIdentityProvidersRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListIdentityProvidersResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListIdentityProvidersCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListIdentityProvidersCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListIdentityProvidersCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/ListNetworkSettingsCommand.ts new file mode 100644 index 000000000000..2e6fdac43c72 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListNetworkSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListNetworkSettingsRequest, ListNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListNetworkSettingsCommand, + serializeAws_restJson1ListNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListNetworkSettingsCommandInput extends ListNetworkSettingsRequest {} +export interface ListNetworkSettingsCommandOutput extends ListNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of network settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link ListNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListNetworkSettingsCommand extends $Command< + ListNetworkSettingsCommandInput, + ListNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListPortalsCommand.ts b/clients/client-workspaces-web/src/commands/ListPortalsCommand.ts new file mode 100644 index 000000000000..fc0ba06e2288 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListPortalsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListPortalsRequest, ListPortalsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListPortalsCommand, + serializeAws_restJson1ListPortalsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListPortalsCommandInput extends ListPortalsRequest {} +export interface ListPortalsCommandOutput extends ListPortalsResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list or web portals.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListPortalsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListPortalsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListPortalsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListPortalsCommandInput} for command's `input` shape. + * @see {@link ListPortalsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListPortalsCommand extends $Command< + ListPortalsCommandInput, + ListPortalsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListPortalsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListPortalsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListPortalsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListPortalsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListPortalsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListPortalsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListPortalsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListTagsForResourceCommand.ts b/clients/client-workspaces-web/src/commands/ListTagsForResourceCommand.ts new file mode 100644 index 000000000000..54e9b5819135 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListTagsForResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListTagsForResourceRequest, ListTagsForResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTagsForResourceCommand, + serializeAws_restJson1ListTagsForResourceCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListTagsForResourceCommandInput extends ListTagsForResourceRequest {} +export interface ListTagsForResourceCommandOutput extends ListTagsForResourceResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of tags for a resource.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListTagsForResourceCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListTagsForResourceCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListTagsForResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTagsForResourceCommandInput} for command's `input` shape. + * @see {@link ListTagsForResourceCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListTagsForResourceCommand extends $Command< + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTagsForResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListTagsForResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTagsForResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTagsForResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTagsForResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTagsForResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTagsForResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListTrustStoreCertificatesCommand.ts b/clients/client-workspaces-web/src/commands/ListTrustStoreCertificatesCommand.ts new file mode 100644 index 000000000000..2ecff6590288 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListTrustStoreCertificatesCommand.ts @@ -0,0 +1,98 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListTrustStoreCertificatesRequest, ListTrustStoreCertificatesResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTrustStoreCertificatesCommand, + serializeAws_restJson1ListTrustStoreCertificatesCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListTrustStoreCertificatesCommandInput extends ListTrustStoreCertificatesRequest {} +export interface ListTrustStoreCertificatesCommandOutput extends ListTrustStoreCertificatesResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of trust store certificates.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListTrustStoreCertificatesCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListTrustStoreCertificatesCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListTrustStoreCertificatesCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTrustStoreCertificatesCommandInput} for command's `input` shape. + * @see {@link ListTrustStoreCertificatesCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListTrustStoreCertificatesCommand extends $Command< + ListTrustStoreCertificatesCommandInput, + ListTrustStoreCertificatesCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTrustStoreCertificatesCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListTrustStoreCertificatesCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTrustStoreCertificatesRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTrustStoreCertificatesResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTrustStoreCertificatesCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTrustStoreCertificatesCommand(input, context); + } + + private deserialize( + output: __HttpResponse, + context: __SerdeContext + ): Promise { + return deserializeAws_restJson1ListTrustStoreCertificatesCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListTrustStoresCommand.ts b/clients/client-workspaces-web/src/commands/ListTrustStoresCommand.ts new file mode 100644 index 000000000000..f1d59c74eeff --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListTrustStoresCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListTrustStoresRequest, ListTrustStoresResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListTrustStoresCommand, + serializeAws_restJson1ListTrustStoresCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListTrustStoresCommandInput extends ListTrustStoresRequest {} +export interface ListTrustStoresCommandOutput extends ListTrustStoresResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of trust stores.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListTrustStoresCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListTrustStoresCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListTrustStoresCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListTrustStoresCommandInput} for command's `input` shape. + * @see {@link ListTrustStoresCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListTrustStoresCommand extends $Command< + ListTrustStoresCommandInput, + ListTrustStoresCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListTrustStoresCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListTrustStoresCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListTrustStoresRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListTrustStoresResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListTrustStoresCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListTrustStoresCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListTrustStoresCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/ListUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/ListUserSettingsCommand.ts new file mode 100644 index 000000000000..92abca313ec4 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/ListUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { ListUserSettingsRequest, ListUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1ListUserSettingsCommand, + serializeAws_restJson1ListUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface ListUserSettingsCommandInput extends ListUserSettingsRequest {} +export interface ListUserSettingsCommandOutput extends ListUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Retrieves a list of user settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, ListUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, ListUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new ListUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link ListUserSettingsCommandInput} for command's `input` shape. + * @see {@link ListUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class ListUserSettingsCommand extends $Command< + ListUserSettingsCommandInput, + ListUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: ListUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "ListUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: ListUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: ListUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: ListUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1ListUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1ListUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/TagResourceCommand.ts b/clients/client-workspaces-web/src/commands/TagResourceCommand.ts new file mode 100644 index 000000000000..3f2aa721fe00 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/TagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { TagResourceRequest, TagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1TagResourceCommand, + serializeAws_restJson1TagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface TagResourceCommandInput extends TagResourceRequest {} +export interface TagResourceCommandOutput extends TagResourceResponse, __MetadataBearer {} + +/** + *

                                                                      Adds or overwrites one or more tags for the specified resource.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, TagResourceCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, TagResourceCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new TagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link TagResourceCommandInput} for command's `input` shape. + * @see {@link TagResourceCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class TagResourceCommand extends $Command< + TagResourceCommandInput, + TagResourceCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: TagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "TagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: TagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: TagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: TagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1TagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1TagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UntagResourceCommand.ts b/clients/client-workspaces-web/src/commands/UntagResourceCommand.ts new file mode 100644 index 000000000000..c3f78354457d --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UntagResourceCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UntagResourceRequest, UntagResourceResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UntagResourceCommand, + serializeAws_restJson1UntagResourceCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UntagResourceCommandInput extends UntagResourceRequest {} +export interface UntagResourceCommandOutput extends UntagResourceResponse, __MetadataBearer {} + +/** + *

                                                                      Removes one or more tags from the specified resource.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UntagResourceCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UntagResourceCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UntagResourceCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UntagResourceCommandInput} for command's `input` shape. + * @see {@link UntagResourceCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UntagResourceCommand extends $Command< + UntagResourceCommandInput, + UntagResourceCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UntagResourceCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UntagResourceCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UntagResourceRequest.filterSensitiveLog, + outputFilterSensitiveLog: UntagResourceResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UntagResourceCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UntagResourceCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UntagResourceCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdateBrowserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/UpdateBrowserSettingsCommand.ts new file mode 100644 index 000000000000..64739177ed03 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdateBrowserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateBrowserSettingsRequest, UpdateBrowserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateBrowserSettingsCommand, + serializeAws_restJson1UpdateBrowserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdateBrowserSettingsCommandInput extends UpdateBrowserSettingsRequest {} +export interface UpdateBrowserSettingsCommandOutput extends UpdateBrowserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Updates browser settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdateBrowserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdateBrowserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdateBrowserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateBrowserSettingsCommandInput} for command's `input` shape. + * @see {@link UpdateBrowserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdateBrowserSettingsCommand extends $Command< + UpdateBrowserSettingsCommandInput, + UpdateBrowserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateBrowserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdateBrowserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateBrowserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateBrowserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateBrowserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateBrowserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateBrowserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdateIdentityProviderCommand.ts b/clients/client-workspaces-web/src/commands/UpdateIdentityProviderCommand.ts new file mode 100644 index 000000000000..7b1c9cc2fb2a --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdateIdentityProviderCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateIdentityProviderRequest, UpdateIdentityProviderResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateIdentityProviderCommand, + serializeAws_restJson1UpdateIdentityProviderCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdateIdentityProviderCommandInput extends UpdateIdentityProviderRequest {} +export interface UpdateIdentityProviderCommandOutput extends UpdateIdentityProviderResponse, __MetadataBearer {} + +/** + *

                                                                      Updates the identity provider.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdateIdentityProviderCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdateIdentityProviderCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdateIdentityProviderCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateIdentityProviderCommandInput} for command's `input` shape. + * @see {@link UpdateIdentityProviderCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdateIdentityProviderCommand extends $Command< + UpdateIdentityProviderCommandInput, + UpdateIdentityProviderCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateIdentityProviderCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdateIdentityProviderCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateIdentityProviderRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateIdentityProviderResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateIdentityProviderCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateIdentityProviderCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateIdentityProviderCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdateNetworkSettingsCommand.ts b/clients/client-workspaces-web/src/commands/UpdateNetworkSettingsCommand.ts new file mode 100644 index 000000000000..1e32edd23103 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdateNetworkSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateNetworkSettingsRequest, UpdateNetworkSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateNetworkSettingsCommand, + serializeAws_restJson1UpdateNetworkSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdateNetworkSettingsCommandInput extends UpdateNetworkSettingsRequest {} +export interface UpdateNetworkSettingsCommandOutput extends UpdateNetworkSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Updates network settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdateNetworkSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdateNetworkSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdateNetworkSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateNetworkSettingsCommandInput} for command's `input` shape. + * @see {@link UpdateNetworkSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdateNetworkSettingsCommand extends $Command< + UpdateNetworkSettingsCommandInput, + UpdateNetworkSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateNetworkSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdateNetworkSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateNetworkSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateNetworkSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateNetworkSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateNetworkSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateNetworkSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdatePortalCommand.ts b/clients/client-workspaces-web/src/commands/UpdatePortalCommand.ts new file mode 100644 index 000000000000..71685eb73924 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdatePortalCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdatePortalRequest, UpdatePortalResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdatePortalCommand, + serializeAws_restJson1UpdatePortalCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdatePortalCommandInput extends UpdatePortalRequest {} +export interface UpdatePortalCommandOutput extends UpdatePortalResponse, __MetadataBearer {} + +/** + *

                                                                      Updates a web portal.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdatePortalCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdatePortalCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdatePortalCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdatePortalCommandInput} for command's `input` shape. + * @see {@link UpdatePortalCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdatePortalCommand extends $Command< + UpdatePortalCommandInput, + UpdatePortalCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdatePortalCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdatePortalCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdatePortalRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdatePortalResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdatePortalCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdatePortalCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdatePortalCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdateTrustStoreCommand.ts b/clients/client-workspaces-web/src/commands/UpdateTrustStoreCommand.ts new file mode 100644 index 000000000000..ddae17295dd6 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdateTrustStoreCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateTrustStoreRequest, UpdateTrustStoreResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateTrustStoreCommand, + serializeAws_restJson1UpdateTrustStoreCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdateTrustStoreCommandInput extends UpdateTrustStoreRequest {} +export interface UpdateTrustStoreCommandOutput extends UpdateTrustStoreResponse, __MetadataBearer {} + +/** + *

                                                                      Updates the trust store.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdateTrustStoreCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdateTrustStoreCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdateTrustStoreCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateTrustStoreCommandInput} for command's `input` shape. + * @see {@link UpdateTrustStoreCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdateTrustStoreCommand extends $Command< + UpdateTrustStoreCommandInput, + UpdateTrustStoreCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateTrustStoreCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdateTrustStoreCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateTrustStoreRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateTrustStoreResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateTrustStoreCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateTrustStoreCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateTrustStoreCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/UpdateUserSettingsCommand.ts b/clients/client-workspaces-web/src/commands/UpdateUserSettingsCommand.ts new file mode 100644 index 000000000000..eb3ac56ba62e --- /dev/null +++ b/clients/client-workspaces-web/src/commands/UpdateUserSettingsCommand.ts @@ -0,0 +1,95 @@ +import { getSerdePlugin } from "@aws-sdk/middleware-serde"; +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { Command as $Command } from "@aws-sdk/smithy-client"; +import { + FinalizeHandlerArguments, + Handler, + HandlerExecutionContext, + HttpHandlerOptions as __HttpHandlerOptions, + MetadataBearer as __MetadataBearer, + MiddlewareStack, + SerdeContext as __SerdeContext, +} from "@aws-sdk/types"; + +import { UpdateUserSettingsRequest, UpdateUserSettingsResponse } from "../models/models_0"; +import { + deserializeAws_restJson1UpdateUserSettingsCommand, + serializeAws_restJson1UpdateUserSettingsCommand, +} from "../protocols/Aws_restJson1"; +import { ServiceInputTypes, ServiceOutputTypes, WorkSpacesWebClientResolvedConfig } from "../WorkSpacesWebClient"; + +export interface UpdateUserSettingsCommandInput extends UpdateUserSettingsRequest {} +export interface UpdateUserSettingsCommandOutput extends UpdateUserSettingsResponse, __MetadataBearer {} + +/** + *

                                                                      Updates the user settings.

                                                                      + * @example + * Use a bare-bones client and the command you need to make an API call. + * ```javascript + * import { WorkSpacesWebClient, UpdateUserSettingsCommand } from "@aws-sdk/client-workspaces-web"; // ES Modules import + * // const { WorkSpacesWebClient, UpdateUserSettingsCommand } = require("@aws-sdk/client-workspaces-web"); // CommonJS import + * const client = new WorkSpacesWebClient(config); + * const command = new UpdateUserSettingsCommand(input); + * const response = await client.send(command); + * ``` + * + * @see {@link UpdateUserSettingsCommandInput} for command's `input` shape. + * @see {@link UpdateUserSettingsCommandOutput} for command's `response` shape. + * @see {@link WorkSpacesWebClientResolvedConfig | config} for WorkSpacesWebClient's `config` shape. + * + */ +export class UpdateUserSettingsCommand extends $Command< + UpdateUserSettingsCommandInput, + UpdateUserSettingsCommandOutput, + WorkSpacesWebClientResolvedConfig +> { + // Start section: command_properties + // End section: command_properties + + constructor(readonly input: UpdateUserSettingsCommandInput) { + // Start section: command_constructor + super(); + // End section: command_constructor + } + + /** + * @internal + */ + resolveMiddleware( + clientStack: MiddlewareStack, + configuration: WorkSpacesWebClientResolvedConfig, + options?: __HttpHandlerOptions + ): Handler { + this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); + + const stack = clientStack.concat(this.middlewareStack); + + const { logger } = configuration; + const clientName = "WorkSpacesWebClient"; + const commandName = "UpdateUserSettingsCommand"; + const handlerExecutionContext: HandlerExecutionContext = { + logger, + clientName, + commandName, + inputFilterSensitiveLog: UpdateUserSettingsRequest.filterSensitiveLog, + outputFilterSensitiveLog: UpdateUserSettingsResponse.filterSensitiveLog, + }; + const { requestHandler } = configuration; + return stack.resolve( + (request: FinalizeHandlerArguments) => + requestHandler.handle(request.request as __HttpRequest, options || {}), + handlerExecutionContext + ); + } + + private serialize(input: UpdateUserSettingsCommandInput, context: __SerdeContext): Promise<__HttpRequest> { + return serializeAws_restJson1UpdateUserSettingsCommand(input, context); + } + + private deserialize(output: __HttpResponse, context: __SerdeContext): Promise { + return deserializeAws_restJson1UpdateUserSettingsCommand(output, context); + } + + // Start section: command_body_extra + // End section: command_body_extra +} diff --git a/clients/client-workspaces-web/src/commands/index.ts b/clients/client-workspaces-web/src/commands/index.ts new file mode 100644 index 000000000000..128bcd7990b7 --- /dev/null +++ b/clients/client-workspaces-web/src/commands/index.ts @@ -0,0 +1,44 @@ +export * from "./AssociateBrowserSettingsCommand"; +export * from "./AssociateNetworkSettingsCommand"; +export * from "./AssociateTrustStoreCommand"; +export * from "./AssociateUserSettingsCommand"; +export * from "./CreateBrowserSettingsCommand"; +export * from "./CreateIdentityProviderCommand"; +export * from "./CreateNetworkSettingsCommand"; +export * from "./CreatePortalCommand"; +export * from "./CreateTrustStoreCommand"; +export * from "./CreateUserSettingsCommand"; +export * from "./DeleteBrowserSettingsCommand"; +export * from "./DeleteIdentityProviderCommand"; +export * from "./DeleteNetworkSettingsCommand"; +export * from "./DeletePortalCommand"; +export * from "./DeleteTrustStoreCommand"; +export * from "./DeleteUserSettingsCommand"; +export * from "./DisassociateBrowserSettingsCommand"; +export * from "./DisassociateNetworkSettingsCommand"; +export * from "./DisassociateTrustStoreCommand"; +export * from "./DisassociateUserSettingsCommand"; +export * from "./GetBrowserSettingsCommand"; +export * from "./GetIdentityProviderCommand"; +export * from "./GetNetworkSettingsCommand"; +export * from "./GetPortalCommand"; +export * from "./GetPortalServiceProviderMetadataCommand"; +export * from "./GetTrustStoreCertificateCommand"; +export * from "./GetTrustStoreCommand"; +export * from "./GetUserSettingsCommand"; +export * from "./ListBrowserSettingsCommand"; +export * from "./ListIdentityProvidersCommand"; +export * from "./ListNetworkSettingsCommand"; +export * from "./ListPortalsCommand"; +export * from "./ListTagsForResourceCommand"; +export * from "./ListTrustStoreCertificatesCommand"; +export * from "./ListTrustStoresCommand"; +export * from "./ListUserSettingsCommand"; +export * from "./TagResourceCommand"; +export * from "./UntagResourceCommand"; +export * from "./UpdateBrowserSettingsCommand"; +export * from "./UpdateIdentityProviderCommand"; +export * from "./UpdateNetworkSettingsCommand"; +export * from "./UpdatePortalCommand"; +export * from "./UpdateTrustStoreCommand"; +export * from "./UpdateUserSettingsCommand"; diff --git a/clients/client-workspaces-web/src/endpoints.ts b/clients/client-workspaces-web/src/endpoints.ts new file mode 100644 index 000000000000..412690e0b58c --- /dev/null +++ b/clients/client-workspaces-web/src/endpoints.ts @@ -0,0 +1,134 @@ +import { getRegionInfo, PartitionHash, RegionHash } from "@aws-sdk/config-resolver"; +import { RegionInfoProvider, RegionInfoProviderOptions } from "@aws-sdk/types"; + +const regionHash: RegionHash = {}; + +const partitionHash: PartitionHash = { + aws: { + regions: [ + "af-south-1", + "ap-east-1", + "ap-northeast-1", + "ap-northeast-2", + "ap-northeast-3", + "ap-south-1", + "ap-southeast-1", + "ap-southeast-2", + "ca-central-1", + "eu-central-1", + "eu-north-1", + "eu-south-1", + "eu-west-1", + "eu-west-2", + "eu-west-3", + "me-south-1", + "sa-east-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ], + regionRegex: "^(us|eu|ap|sa|ca|me|af)\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "workspaces-web.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "workspaces-web-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "workspaces-web-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "workspaces-web.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, + "aws-cn": { + regions: ["cn-north-1", "cn-northwest-1"], + regionRegex: "^cn\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "workspaces-web.{region}.amazonaws.com.cn", + tags: [], + }, + { + hostname: "workspaces-web-fips.{region}.amazonaws.com.cn", + tags: ["fips"], + }, + { + hostname: "workspaces-web-fips.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack", "fips"], + }, + { + hostname: "workspaces-web.{region}.api.amazonwebservices.com.cn", + tags: ["dualstack"], + }, + ], + }, + "aws-iso": { + regions: ["us-iso-east-1", "us-iso-west-1"], + regionRegex: "^us\\-iso\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "workspaces-web.{region}.c2s.ic.gov", + tags: [], + }, + { + hostname: "workspaces-web-fips.{region}.c2s.ic.gov", + tags: ["fips"], + }, + ], + }, + "aws-iso-b": { + regions: ["us-isob-east-1"], + regionRegex: "^us\\-isob\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "workspaces-web.{region}.sc2s.sgov.gov", + tags: [], + }, + { + hostname: "workspaces-web-fips.{region}.sc2s.sgov.gov", + tags: ["fips"], + }, + ], + }, + "aws-us-gov": { + regions: ["us-gov-east-1", "us-gov-west-1"], + regionRegex: "^us\\-gov\\-\\w+\\-\\d+$", + variants: [ + { + hostname: "workspaces-web.{region}.amazonaws.com", + tags: [], + }, + { + hostname: "workspaces-web-fips.{region}.amazonaws.com", + tags: ["fips"], + }, + { + hostname: "workspaces-web-fips.{region}.api.aws", + tags: ["dualstack", "fips"], + }, + { + hostname: "workspaces-web.{region}.api.aws", + tags: ["dualstack"], + }, + ], + }, +}; + +export const defaultRegionInfoProvider: RegionInfoProvider = async ( + region: string, + options?: RegionInfoProviderOptions +) => + getRegionInfo(region, { + ...options, + signingService: "workspaces-web", + regionHash, + partitionHash, + }); diff --git a/clients/client-workspaces-web/src/index.ts b/clients/client-workspaces-web/src/index.ts new file mode 100644 index 000000000000..bd8a3f9dd460 --- /dev/null +++ b/clients/client-workspaces-web/src/index.ts @@ -0,0 +1,5 @@ +export * from "./WorkSpacesWeb"; +export * from "./WorkSpacesWebClient"; +export * from "./commands"; +export * from "./models"; +export * from "./pagination"; diff --git a/clients/client-workspaces-web/src/models/index.ts b/clients/client-workspaces-web/src/models/index.ts new file mode 100644 index 000000000000..09c5d6e09b8c --- /dev/null +++ b/clients/client-workspaces-web/src/models/index.ts @@ -0,0 +1 @@ +export * from "./models_0"; diff --git a/clients/client-workspaces-web/src/models/models_0.ts b/clients/client-workspaces-web/src/models/models_0.ts new file mode 100644 index 000000000000..46ff5f4e2b6a --- /dev/null +++ b/clients/client-workspaces-web/src/models/models_0.ts @@ -0,0 +1,2954 @@ +import { MetadataBearer as $MetadataBearer, SmithyException as __SmithyException } from "@aws-sdk/types"; + +/** + *

                                                                      Access is denied.

                                                                      + */ +export interface AccessDeniedException extends __SmithyException, $MetadataBearer { + name: "AccessDeniedException"; + $fault: "client"; + message?: string; +} + +export namespace AccessDeniedException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AccessDeniedException): any => ({ + ...obj, + }); +} + +export interface AssociateBrowserSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; +} + +export namespace AssociateBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface AssociateBrowserSettingsResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; +} + +export namespace AssociateBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                                                      There is a conflict.

                                                                      + */ +export interface ConflictException extends __SmithyException, $MetadataBearer { + name: "ConflictException"; + $fault: "client"; + message?: string; + /** + *

                                                                      Identifier of the resource affected.

                                                                      + */ + resourceId?: string; + + /** + *

                                                                      Type of the resource affected.

                                                                      + */ + resourceType?: string; +} + +export namespace ConflictException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ConflictException): any => ({ + ...obj, + }); +} + +/** + *

                                                                      There is an internal server error.

                                                                      + */ +export interface InternalServerException extends __SmithyException, $MetadataBearer { + name: "InternalServerException"; + $fault: "server"; + message?: string; + /** + *

                                                                      Advice to clients on when the call can be safely retried.

                                                                      + */ + retryAfterSeconds?: number; +} + +export namespace InternalServerException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: InternalServerException): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The resource cannot be found.

                                                                      + */ +export interface ResourceNotFoundException extends __SmithyException, $MetadataBearer { + name: "ResourceNotFoundException"; + $fault: "client"; + message?: string; + /** + *

                                                                      Hypothetical identifier of the resource affected.

                                                                      + */ + resourceId?: string; + + /** + *

                                                                      Hypothetical type of the resource affected.

                                                                      + */ + resourceType?: string; +} + +export namespace ResourceNotFoundException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ResourceNotFoundException): any => ({ + ...obj, + }); +} + +/** + *

                                                                      There is a throttling error.

                                                                      + */ +export interface ThrottlingException extends __SmithyException, $MetadataBearer { + name: "ThrottlingException"; + $fault: "client"; + message?: string; + /** + *

                                                                      The originating service.

                                                                      + */ + serviceCode?: string; + + /** + *

                                                                      The originating quota.

                                                                      + */ + quotaCode?: string; + + /** + *

                                                                      Advice to clients on when the call can be safely retried.

                                                                      + */ + retryAfterSeconds?: number; +} + +export namespace ThrottlingException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ThrottlingException): any => ({ + ...obj, + }); +} + +/** + *

                                                                      Information about a field passed inside a request that resulted in an exception.

                                                                      + */ +export interface ValidationExceptionField { + /** + *

                                                                      The name of the field that failed validation.

                                                                      + */ + name: string | undefined; + + /** + *

                                                                      The message describing why the field failed validation.

                                                                      + */ + message: string | undefined; +} + +export namespace ValidationExceptionField { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationExceptionField): any => ({ + ...obj, + }); +} + +export enum ValidationExceptionReason { + CANNOT_PARSE = "cannotParse", + FIELD_VALIDATION_FAILED = "fieldValidationFailed", + OTHER = "other", + UNKNOWN_OPERATION = "unknownOperation", +} + +/** + *

                                                                      There is a validation error.

                                                                      + */ +export interface ValidationException extends __SmithyException, $MetadataBearer { + name: "ValidationException"; + $fault: "client"; + message?: string; + /** + *

                                                                      Reason the request failed validation

                                                                      + */ + reason?: ValidationExceptionReason | string; + + /** + *

                                                                      The field that caused the error.

                                                                      + */ + fieldList?: ValidationExceptionField[]; +} + +export namespace ValidationException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ValidationException): any => ({ + ...obj, + }); +} + +export interface AssociateNetworkSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; +} + +export namespace AssociateNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +export interface AssociateNetworkSettingsResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; +} + +export namespace AssociateNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface AssociateTrustStoreRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace AssociateTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateTrustStoreRequest): any => ({ + ...obj, + }); +} + +export interface AssociateTrustStoreResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace AssociateTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateTrustStoreResponse): any => ({ + ...obj, + }); +} + +export interface AssociateUserSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; +} + +export namespace AssociateUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateUserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface AssociateUserSettingsResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; +} + +export namespace AssociateUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: AssociateUserSettingsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The tag.

                                                                      + */ +export interface Tag { + /** + *

                                                                      The key of the tag.

                                                                      + */ + Key: string | undefined; + + /** + *

                                                                      The value of the tag

                                                                      + */ + Value: string | undefined; +} + +export namespace Tag { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Tag): any => ({ + ...obj, + }); +} + +export interface CreateBrowserSettingsRequest { + /** + *

                                                                      The tags to add to the browser settings resource. A tag is a key-value pair.

                                                                      + */ + tags?: Tag[]; + + /** + *

                                                                      The custom managed key of the browser settings.

                                                                      + */ + customerManagedKey?: string; + + /** + *

                                                                      Additional encryption context of the browser settings.

                                                                      + */ + additionalEncryptionContext?: { [key: string]: string }; + + /** + *

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all + * streaming sessions.

                                                                      + */ + browserPolicy: string | undefined; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreateBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface CreateBrowserSettingsResponse { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; +} + +export namespace CreateBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The service quota has been exceeded.

                                                                      + */ +export interface ServiceQuotaExceededException extends __SmithyException, $MetadataBearer { + name: "ServiceQuotaExceededException"; + $fault: "client"; + message?: string; + /** + *

                                                                      Identifier of the resource affected.

                                                                      + */ + resourceId?: string; + + /** + *

                                                                      Type of the resource affected.

                                                                      + */ + resourceType?: string; + + /** + *

                                                                      The originating service.

                                                                      + */ + serviceCode?: string; + + /** + *

                                                                      The originating quota.

                                                                      + */ + quotaCode?: string; +} + +export namespace ServiceQuotaExceededException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ServiceQuotaExceededException): any => ({ + ...obj, + }); +} + +export enum IdentityProviderType { + Facebook = "Facebook", + Google = "Google", + LoginWithAmazon = "LoginWithAmazon", + OIDC = "OIDC", + SAML = "SAML", + SignInWithApple = "SignInWithApple", +} + +export interface CreateIdentityProviderRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The identity provider name.

                                                                      + */ + identityProviderName: string | undefined; + + /** + *

                                                                      The identity provider type.

                                                                      + */ + identityProviderType: IdentityProviderType | string | undefined; + + /** + *

                                                                      The identity provider details. The following list describes the provider detail keys for + * each identity provider type.

                                                                      + *
                                                                        + *
                                                                      • + *

                                                                        For Google and Login with Amazon:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For Facebook:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * api_version + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For Sign in with Apple:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * team_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * key_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * private_key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For OIDC providers:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * attributes_request_method + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * oidc_issuer + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_url + * if not available from discovery URL specified by + * oidc_issuer key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * token_url + * if not available from discovery URL specified by + * oidc_issuer key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * attributes_url + * if not available from discovery URL specified by + * oidc_issuer key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * jwks_uri + * if not available from discovery URL specified by + * oidc_issuer key + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For SAML providers:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * MetadataFile OR MetadataURL + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * IDPSignout + * optional + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      + */ + identityProviderDetails: { [key: string]: string } | undefined; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreateIdentityProviderRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateIdentityProviderRequest): any => ({ + ...obj, + }); +} + +export interface CreateIdentityProviderResponse { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn: string | undefined; +} + +export namespace CreateIdentityProviderResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateIdentityProviderResponse): any => ({ + ...obj, + }); +} + +export interface CreateNetworkSettingsRequest { + /** + *

                                                                      The VPC that streaming instances will connect to.

                                                                      + */ + vpcId: string | undefined; + + /** + *

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      + */ + subnetIds: string[] | undefined; + + /** + *

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      + */ + securityGroupIds: string[] | undefined; + + /** + *

                                                                      The tags to add to the network settings resource. A tag is a key-value pair.

                                                                      + */ + tags?: Tag[]; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreateNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +export interface CreateNetworkSettingsResponse { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; +} + +export namespace CreateNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface CreatePortalRequest { + /** + *

                                                                      The name of the web portal. This is not visible to users who log into the web portal.

                                                                      + */ + displayName?: string; + + /** + *

                                                                      The tags to add to the web portal. A tag is a key-value pair.

                                                                      + */ + tags?: Tag[]; + + /** + *

                                                                      The customer managed key of the web portal.

                                                                      + */ + customerManagedKey?: string; + + /** + *

                                                                      The additional encryption context of the portal.

                                                                      + */ + additionalEncryptionContext?: { [key: string]: string }; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreatePortalRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePortalRequest): any => ({ + ...obj, + }); +} + +export interface CreatePortalResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The endpoint URL of the web portal that users access in order to start streaming sessions.

                                                                      + */ + portalEndpoint: string | undefined; +} + +export namespace CreatePortalResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreatePortalResponse): any => ({ + ...obj, + }); +} + +export interface CreateTrustStoreRequest { + /** + *

                                                                      A list of CA certificates to be added to the trust store.

                                                                      + */ + certificateList: Uint8Array[] | undefined; + + /** + *

                                                                      The tags to add to the trust store. A tag is a key-value pair.

                                                                      + */ + tags?: Tag[]; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreateTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTrustStoreRequest): any => ({ + ...obj, + }); +} + +export interface CreateTrustStoreResponse { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace CreateTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateTrustStoreResponse): any => ({ + ...obj, + }); +} + +export enum EnabledType { + DISABLED = "Disabled", + ENABLED = "Enabled", +} + +export interface CreateUserSettingsRequest { + /** + *

                                                                      Specifies whether the user can copy text from the streaming session to the local + * device.

                                                                      + */ + copyAllowed: EnabledType | string | undefined; + + /** + *

                                                                      Specifies whether the user can paste text from the local device to the streaming + * session.

                                                                      + */ + pasteAllowed: EnabledType | string | undefined; + + /** + *

                                                                      Specifies whether the user can download files from the streaming session to the local + * device.

                                                                      + */ + downloadAllowed: EnabledType | string | undefined; + + /** + *

                                                                      Specifies whether the user can upload files from the local device to the streaming + * session.

                                                                      + */ + uploadAllowed: EnabledType | string | undefined; + + /** + *

                                                                      Specifies whether the user can print to the local device.

                                                                      + */ + printAllowed: EnabledType | string | undefined; + + /** + *

                                                                      The tags to add to the user settings resource. A tag is a key-value pair.

                                                                      + */ + tags?: Tag[]; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace CreateUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface CreateUserSettingsResponse { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; +} + +export namespace CreateUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CreateUserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DeleteBrowserSettingsRequest { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; +} + +export namespace DeleteBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DeleteBrowserSettingsResponse {} + +export namespace DeleteBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DeleteIdentityProviderRequest { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn: string | undefined; +} + +export namespace DeleteIdentityProviderRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteIdentityProviderRequest): any => ({ + ...obj, + }); +} + +export interface DeleteIdentityProviderResponse {} + +export namespace DeleteIdentityProviderResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteIdentityProviderResponse): any => ({ + ...obj, + }); +} + +export interface DeleteNetworkSettingsRequest { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; +} + +export namespace DeleteNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DeleteNetworkSettingsResponse {} + +export namespace DeleteNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DeletePortalRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace DeletePortalRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePortalRequest): any => ({ + ...obj, + }); +} + +export interface DeletePortalResponse {} + +export namespace DeletePortalResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeletePortalResponse): any => ({ + ...obj, + }); +} + +export interface DeleteTrustStoreRequest { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace DeleteTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTrustStoreRequest): any => ({ + ...obj, + }); +} + +export interface DeleteTrustStoreResponse {} + +export namespace DeleteTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteTrustStoreResponse): any => ({ + ...obj, + }); +} + +export interface DeleteUserSettingsRequest { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; +} + +export namespace DeleteUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteUserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DeleteUserSettingsResponse {} + +export namespace DeleteUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DeleteUserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DisassociateBrowserSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace DisassociateBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateBrowserSettingsResponse {} + +export namespace DisassociateBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DisassociateNetworkSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace DisassociateNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateNetworkSettingsResponse {} + +export namespace DisassociateNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface DisassociateTrustStoreRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace DisassociateTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateTrustStoreRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateTrustStoreResponse {} + +export namespace DisassociateTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateTrustStoreResponse): any => ({ + ...obj, + }); +} + +export interface DisassociateUserSettingsRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace DisassociateUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateUserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface DisassociateUserSettingsResponse {} + +export namespace DisassociateUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: DisassociateUserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface GetBrowserSettingsRequest { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; +} + +export namespace GetBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The browser settings resource that can be associated with a web portal. Once associated + * with a web portal, browser settings control how the browser will behave once a user starts + * a streaming session for the web portal.

                                                                      + */ +export interface BrowserSettings { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; + + /** + *

                                                                      A list of web portal ARNs that this browser settings is associated with.

                                                                      + */ + associatedPortalArns?: string[]; + + /** + *

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all + * streaming sessions.

                                                                      + */ + browserPolicy?: string; +} + +export namespace BrowserSettings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BrowserSettings): any => ({ + ...obj, + }); +} + +export interface GetBrowserSettingsResponse { + /** + *

                                                                      The browser settings.

                                                                      + */ + browserSettings?: BrowserSettings; +} + +export namespace GetBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface GetIdentityProviderRequest { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn: string | undefined; +} + +export namespace GetIdentityProviderRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetIdentityProviderRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The identity provider.

                                                                      + */ +export interface IdentityProvider { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn: string | undefined; + + /** + *

                                                                      The identity provider name.

                                                                      + */ + identityProviderName?: string; + + /** + *

                                                                      The identity provider type.

                                                                      + */ + identityProviderType?: IdentityProviderType | string; + + /** + *

                                                                      The identity provider details. The following list describes the provider detail keys for + * each identity provider type.

                                                                      + *
                                                                        + *
                                                                      • + *

                                                                        For Google and Login with Amazon:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For Facebook:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * api_version + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For Sign in with Apple:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * team_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * key_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * private_key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For OIDC providers:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * client_id + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * client_secret + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * attributes_request_method + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * oidc_issuer + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_scopes + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * authorize_url + * if not available from discovery URL specified by oidc_issuer + * key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * token_url + * if not available from discovery URL specified by oidc_issuer + * key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * attributes_url + * if not available from discovery URL specified by oidc_issuer + * key + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * jwks_uri + * if not available from discovery URL specified by oidc_issuer + * key + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      • + *

                                                                        For SAML providers:

                                                                        + *
                                                                          + *
                                                                        • + *

                                                                          + * MetadataFile OR MetadataURL + *

                                                                          + *
                                                                        • + *
                                                                        • + *

                                                                          + * IDPSignout + * optional + *

                                                                          + *
                                                                        • + *
                                                                        + *
                                                                      • + *
                                                                      + */ + identityProviderDetails?: { [key: string]: string }; +} + +export namespace IdentityProvider { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IdentityProvider): any => ({ + ...obj, + }); +} + +export interface GetIdentityProviderResponse { + /** + *

                                                                      The identity provider.

                                                                      + */ + identityProvider?: IdentityProvider; +} + +export namespace GetIdentityProviderResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetIdentityProviderResponse): any => ({ + ...obj, + }); +} + +export interface GetNetworkSettingsRequest { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; +} + +export namespace GetNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      A network settings resource that can be associated with a web portal. Once associated + * with a web portal, network settings define how streaming instances will connect with your + * specified VPC.

                                                                      + */ +export interface NetworkSettings { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; + + /** + *

                                                                      A list of web portal ARNs that this network settings is associated with.

                                                                      + */ + associatedPortalArns?: string[]; + + /** + *

                                                                      The VPC that streaming instances will connect to.

                                                                      + */ + vpcId?: string; + + /** + *

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      + */ + subnetIds?: string[]; + + /** + *

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      + */ + securityGroupIds?: string[]; +} + +export namespace NetworkSettings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NetworkSettings): any => ({ + ...obj, + }); +} + +export interface GetNetworkSettingsResponse { + /** + *

                                                                      The network settings.

                                                                      + */ + networkSettings?: NetworkSettings; +} + +export namespace GetNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface GetPortalRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace GetPortalRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPortalRequest): any => ({ + ...obj, + }); +} + +export enum BrowserType { + CHROME = "Chrome", +} + +export enum PortalStatus { + ACTIVE = "Active", + INCOMPLETE = "Incomplete", + PENDING = "Pending", +} + +export enum RendererType { + APPSTREAM = "AppStream", +} + +/** + *

                                                                      The web portal.

                                                                      + */ +export interface Portal { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn?: string; + + /** + *

                                                                      The renderer that is used in streaming sessions.

                                                                      + */ + rendererType?: RendererType | string; + + /** + *

                                                                      The browser that users see when using a streaming session.

                                                                      + */ + browserType?: BrowserType | string; + + /** + *

                                                                      The status of the web portal.

                                                                      + */ + portalStatus?: PortalStatus | string; + + /** + *

                                                                      The endpoint URL of the web portal that users access in order to start streaming + * sessions.

                                                                      + */ + portalEndpoint?: string; + + /** + *

                                                                      The name of the web portal.

                                                                      + */ + displayName?: string; + + /** + *

                                                                      The creation date of the web portal.

                                                                      + */ + creationDate?: Date; + + /** + *

                                                                      The ARN of the browser settings that is associated with this web portal.

                                                                      + */ + browserSettingsArn?: string; + + /** + *

                                                                      The ARN of the trust store that is associated with the web portal.

                                                                      + */ + userSettingsArn?: string; + + /** + *

                                                                      The ARN of the network settings that is associated with the web portal.

                                                                      + */ + networkSettingsArn?: string; + + /** + *

                                                                      The ARN of the trust store that is associated with the web portal.

                                                                      + */ + trustStoreArn?: string; + + /** + *

                                                                      A message that explains why the web portal is in its current status.

                                                                      + */ + statusReason?: string; +} + +export namespace Portal { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Portal): any => ({ + ...obj, + }); +} + +export interface GetPortalResponse { + /** + *

                                                                      The web portal.

                                                                      + */ + portal?: Portal; +} + +export namespace GetPortalResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPortalResponse): any => ({ + ...obj, + }); +} + +export interface GetPortalServiceProviderMetadataRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace GetPortalServiceProviderMetadataRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPortalServiceProviderMetadataRequest): any => ({ + ...obj, + }); +} + +export interface GetPortalServiceProviderMetadataResponse { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The service provider SAML metadata.

                                                                      + */ + serviceProviderSamlMetadata?: string; +} + +export namespace GetPortalServiceProviderMetadataResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetPortalServiceProviderMetadataResponse): any => ({ + ...obj, + }); +} + +export interface GetTrustStoreRequest { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace GetTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTrustStoreRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      A trust store that can be associated with a web portal. A trust store contains + * certificate authority (CA) certificates. Once associated with a web portal, the browser in + * a streaming session will recognize certificates that have been issued using any of the CAs + * in the trust store. If your organization has internal websites that use certificates issued + * by private CAs, you should add the private CA certificate to the trust store.

                                                                      + */ +export interface TrustStore { + /** + *

                                                                      A list of web portal ARNs that this trust store is associated with.

                                                                      + */ + associatedPortalArns?: string[]; + + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn?: string; +} + +export namespace TrustStore { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrustStore): any => ({ + ...obj, + }); +} + +export interface GetTrustStoreResponse { + /** + *

                                                                      The trust store.

                                                                      + */ + trustStore?: TrustStore; +} + +export namespace GetTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTrustStoreResponse): any => ({ + ...obj, + }); +} + +export interface GetTrustStoreCertificateRequest { + /** + *

                                                                      The ARN of the trust store certificate.

                                                                      + */ + trustStoreArn: string | undefined; + + /** + *

                                                                      The thumbprint of the trust store certificate.

                                                                      + */ + thumbprint: string | undefined; +} + +export namespace GetTrustStoreCertificateRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTrustStoreCertificateRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The certificate.

                                                                      + */ +export interface Certificate { + /** + *

                                                                      A hexadecimal identifier for the certificate.

                                                                      + */ + thumbprint?: string; + + /** + *

                                                                      The entity the certificate belongs to.

                                                                      + */ + subject?: string; + + /** + *

                                                                      The entity that issued the certificate.

                                                                      + */ + issuer?: string; + + /** + *

                                                                      The certificate is not valid before this date.

                                                                      + */ + notValidBefore?: Date; + + /** + *

                                                                      The certificate is not valid after this date.

                                                                      + */ + notValidAfter?: Date; + + /** + *

                                                                      The body of the certificate.

                                                                      + */ + body?: Uint8Array; +} + +export namespace Certificate { + /** + * @internal + */ + export const filterSensitiveLog = (obj: Certificate): any => ({ + ...obj, + }); +} + +export interface GetTrustStoreCertificateResponse { + /** + *

                                                                      The ARN of the trust store certificate.

                                                                      + */ + trustStoreArn?: string; + + /** + *

                                                                      The certificate of the trust store certificate.

                                                                      + */ + certificate?: Certificate; +} + +export namespace GetTrustStoreCertificateResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetTrustStoreCertificateResponse): any => ({ + ...obj, + }); +} + +export interface GetUserSettingsRequest { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; +} + +export namespace GetUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUserSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      A user settings resource that can be associated with a web portal. Once associated with + * a web portal, user settings control how users can transfer data between a streaming session + * and the their local devices.

                                                                      + */ +export interface UserSettings { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; + + /** + *

                                                                      A list of web portal ARNs that this user settings is associated with.

                                                                      + */ + associatedPortalArns?: string[]; + + /** + *

                                                                      Specifies whether the user can copy text from the streaming session to the local + * device.

                                                                      + */ + copyAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can paste text from the local device to the streaming + * session.

                                                                      + */ + pasteAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can download files from the streaming session to the local + * device.

                                                                      + */ + downloadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can upload files from the local device to the streaming + * session.

                                                                      + */ + uploadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can print to the local device.

                                                                      + */ + printAllowed?: EnabledType | string; +} + +export namespace UserSettings { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UserSettings): any => ({ + ...obj, + }); +} + +export interface GetUserSettingsResponse { + /** + *

                                                                      The user settings.

                                                                      + */ + userSettings?: UserSettings; +} + +export namespace GetUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: GetUserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface ListBrowserSettingsRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary for browser settings.

                                                                      + */ +export interface BrowserSettingsSummary { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn?: string; +} + +export namespace BrowserSettingsSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: BrowserSettingsSummary): any => ({ + ...obj, + }); +} + +export interface ListBrowserSettingsResponse { + /** + *

                                                                      The browser settings.

                                                                      + */ + browserSettings?: BrowserSettingsSummary[]; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; +} + +export namespace ListBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface ListIdentityProvidersRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; + + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; +} + +export namespace ListIdentityProvidersRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListIdentityProvidersRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of the identity provider.

                                                                      + */ +export interface IdentityProviderSummary { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn?: string; + + /** + *

                                                                      The identity provider name.

                                                                      + */ + identityProviderName?: string; + + /** + *

                                                                      The identity provider type.

                                                                      + */ + identityProviderType?: IdentityProviderType | string; +} + +export namespace IdentityProviderSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: IdentityProviderSummary): any => ({ + ...obj, + }); +} + +export interface ListIdentityProvidersResponse { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The identity providers.

                                                                      + */ + identityProviders?: IdentityProviderSummary[]; +} + +export namespace ListIdentityProvidersResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListIdentityProvidersResponse): any => ({ + ...obj, + }); +} + +export interface ListNetworkSettingsRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of network settings.

                                                                      + */ +export interface NetworkSettingsSummary { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn?: string; + + /** + *

                                                                      The VPC ID of the network settings.

                                                                      + */ + vpcId?: string; +} + +export namespace NetworkSettingsSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: NetworkSettingsSummary): any => ({ + ...obj, + }); +} + +export interface ListNetworkSettingsResponse { + /** + *

                                                                      The network settings.

                                                                      + */ + networkSettings?: NetworkSettingsSummary[]; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; +} + +export namespace ListNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface ListPortalsRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListPortalsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPortalsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of the portal.

                                                                      + */ +export interface PortalSummary { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn?: string; + + /** + *

                                                                      The renderer that is used in streaming sessions.

                                                                      + */ + rendererType?: RendererType | string; + + /** + *

                                                                      The browser type of the web portal.

                                                                      + */ + browserType?: BrowserType | string; + + /** + *

                                                                      The status of the web portal.

                                                                      + */ + portalStatus?: PortalStatus | string; + + /** + *

                                                                      The endpoint URL of the web portal that users access in order to start streaming + * sessions.

                                                                      + */ + portalEndpoint?: string; + + /** + *

                                                                      The name of the web portal.

                                                                      + */ + displayName?: string; + + /** + *

                                                                      The creation date of the web portal.

                                                                      + */ + creationDate?: Date; + + /** + *

                                                                      The ARN of the browser settings that is associated with the web portal.

                                                                      + */ + browserSettingsArn?: string; + + /** + *

                                                                      The ARN of the user settings that is associated with the web portal.

                                                                      + */ + userSettingsArn?: string; + + /** + *

                                                                      The ARN of the network settings that is associated with the web portal.

                                                                      + */ + networkSettingsArn?: string; + + /** + *

                                                                      The ARN of the trust that is associated with this web portal.

                                                                      + */ + trustStoreArn?: string; +} + +export namespace PortalSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: PortalSummary): any => ({ + ...obj, + }); +} + +export interface ListPortalsResponse { + /** + *

                                                                      The portals in the list.

                                                                      + */ + portals?: PortalSummary[]; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; +} + +export namespace ListPortalsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListPortalsResponse): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceRequest { + /** + *

                                                                      The ARN of the resource.

                                                                      + */ + resourceArn: string | undefined; +} + +export namespace ListTagsForResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceRequest): any => ({ + ...obj, + }); +} + +export interface ListTagsForResourceResponse { + /** + *

                                                                      The tags of the resource.

                                                                      + */ + tags?: Tag[]; +} + +export namespace ListTagsForResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTagsForResourceResponse): any => ({ + ...obj, + }); +} + +export interface ListTrustStoreCertificatesRequest { + /** + *

                                                                      The ARN of the trust store

                                                                      + */ + trustStoreArn: string | undefined; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListTrustStoreCertificatesRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTrustStoreCertificatesRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of the certificate.

                                                                      + */ +export interface CertificateSummary { + /** + *

                                                                      A hexadecimal identifier for the certificate.

                                                                      + */ + thumbprint?: string; + + /** + *

                                                                      The entity the certificate belongs to.

                                                                      + */ + subject?: string; + + /** + *

                                                                      The entity that issued the certificate.

                                                                      + */ + issuer?: string; + + /** + *

                                                                      The certificate is not valid before this date.

                                                                      + */ + notValidBefore?: Date; + + /** + *

                                                                      The certificate is not valid after this date.

                                                                      + */ + notValidAfter?: Date; +} + +export namespace CertificateSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: CertificateSummary): any => ({ + ...obj, + }); +} + +export interface ListTrustStoreCertificatesResponse { + /** + *

                                                                      The certificate list.

                                                                      + */ + certificateList?: CertificateSummary[]; + + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn?: string; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.>

                                                                      + */ + nextToken?: string; +} + +export namespace ListTrustStoreCertificatesResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTrustStoreCertificatesResponse): any => ({ + ...obj, + }); +} + +export interface ListTrustStoresRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListTrustStoresRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTrustStoresRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of the trust store.

                                                                      + */ +export interface TrustStoreSummary { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn?: string; +} + +export namespace TrustStoreSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TrustStoreSummary): any => ({ + ...obj, + }); +} + +export interface ListTrustStoresResponse { + /** + *

                                                                      The trust stores.

                                                                      + */ + trustStores?: TrustStoreSummary[]; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; +} + +export namespace ListTrustStoresResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListTrustStoresResponse): any => ({ + ...obj, + }); +} + +export interface ListUserSettingsRequest { + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; + + /** + *

                                                                      The maximum number of results to be included in the next page.

                                                                      + */ + maxResults?: number; +} + +export namespace ListUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUserSettingsRequest): any => ({ + ...obj, + }); +} + +/** + *

                                                                      The summary of user settings.

                                                                      + */ +export interface UserSettingsSummary { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn?: string; + + /** + *

                                                                      Specifies whether the user can copy text from the streaming session to the local + * device.

                                                                      + */ + copyAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can paste text from the local device to the streaming + * session.

                                                                      + */ + pasteAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can download files from the streaming session to the local + * device.

                                                                      + */ + downloadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can upload files from the local device to the streaming + * session.

                                                                      + */ + uploadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can print to the local device.

                                                                      + */ + printAllowed?: EnabledType | string; +} + +export namespace UserSettingsSummary { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UserSettingsSummary): any => ({ + ...obj, + }); +} + +export interface ListUserSettingsResponse { + /** + *

                                                                      The user settings.

                                                                      + */ + userSettings?: UserSettingsSummary[]; + + /** + *

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      + */ + nextToken?: string; +} + +export namespace ListUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: ListUserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface TagResourceRequest { + /** + *

                                                                      The ARN of the resource.

                                                                      + */ + resourceArn: string | undefined; + + /** + *

                                                                      The tags of the resource.

                                                                      + */ + tags: Tag[] | undefined; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token returns the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace TagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceRequest): any => ({ + ...obj, + }); +} + +export interface TagResourceResponse {} + +export namespace TagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TagResourceResponse): any => ({ + ...obj, + }); +} + +/** + *

                                                                      There are too many tags.

                                                                      + */ +export interface TooManyTagsException extends __SmithyException, $MetadataBearer { + name: "TooManyTagsException"; + $fault: "client"; + message?: string; + /** + *

                                                                      Name of the resource affected.

                                                                      + */ + resourceName?: string; +} + +export namespace TooManyTagsException { + /** + * @internal + */ + export const filterSensitiveLog = (obj: TooManyTagsException): any => ({ + ...obj, + }); +} + +export interface UntagResourceRequest { + /** + *

                                                                      The ARN of the resource.

                                                                      + */ + resourceArn: string | undefined; + + /** + *

                                                                      The list of tag keys to remove from the resource.

                                                                      + */ + tagKeys: string[] | undefined; +} + +export namespace UntagResourceRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceRequest): any => ({ + ...obj, + }); +} + +export interface UntagResourceResponse {} + +export namespace UntagResourceResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UntagResourceResponse): any => ({ + ...obj, + }); +} + +export interface UpdateBrowserSettingsRequest { + /** + *

                                                                      The ARN of the browser settings.

                                                                      + */ + browserSettingsArn: string | undefined; + + /** + *

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all + * streaming sessions.

                                                                      + */ + browserPolicy?: string; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token return the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace UpdateBrowserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateBrowserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface UpdateBrowserSettingsResponse { + /** + *

                                                                      The browser settings.

                                                                      + */ + browserSettings: BrowserSettings | undefined; +} + +export namespace UpdateBrowserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateBrowserSettingsResponse): any => ({ + ...obj, + }); +} + +export interface UpdateIdentityProviderRequest { + /** + *

                                                                      The ARN of the identity provider.

                                                                      + */ + identityProviderArn: string | undefined; + + /** + *

                                                                      The name of the identity provider.

                                                                      + */ + identityProviderName?: string; + + /** + *

                                                                      The type of the identity provider.

                                                                      + */ + identityProviderType?: IdentityProviderType | string; + + /** + *

                                                                      The details of the identity provider.

                                                                      + */ + identityProviderDetails?: { [key: string]: string }; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token return the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace UpdateIdentityProviderRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateIdentityProviderRequest): any => ({ + ...obj, + }); +} + +export interface UpdateIdentityProviderResponse { + /** + *

                                                                      The identity provider.

                                                                      + */ + identityProvider: IdentityProvider | undefined; +} + +export namespace UpdateIdentityProviderResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateIdentityProviderResponse): any => ({ + ...obj, + }); +} + +export interface UpdateNetworkSettingsRequest { + /** + *

                                                                      The ARN of the network settings.

                                                                      + */ + networkSettingsArn: string | undefined; + + /** + *

                                                                      The VPC that streaming instances will connect to.

                                                                      + */ + vpcId?: string; + + /** + *

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      + */ + subnetIds?: string[]; + + /** + *

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      + */ + securityGroupIds?: string[]; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token return the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace UpdateNetworkSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNetworkSettingsRequest): any => ({ + ...obj, + }); +} + +export interface UpdateNetworkSettingsResponse { + /** + *

                                                                      The network settings.

                                                                      + */ + networkSettings: NetworkSettings | undefined; +} + +export namespace UpdateNetworkSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateNetworkSettingsResponse): any => ({ + ...obj, + }); +} + +export interface UpdatePortalRequest { + /** + *

                                                                      The ARN of the web portal.

                                                                      + */ + portalArn: string | undefined; + + /** + *

                                                                      The name of the web portal. This is not visible to users who log into the web portal.

                                                                      + */ + displayName?: string; +} + +export namespace UpdatePortalRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePortalRequest): any => ({ + ...obj, + }); +} + +export interface UpdatePortalResponse { + /** + *

                                                                      The web portal.

                                                                      + */ + portal?: Portal; +} + +export namespace UpdatePortalResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdatePortalResponse): any => ({ + ...obj, + }); +} + +export interface UpdateTrustStoreRequest { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; + + /** + *

                                                                      A list of CA certificates to add to the trust store.

                                                                      + */ + certificatesToAdd?: Uint8Array[]; + + /** + *

                                                                      A list of CA certificates to delete from a trust store.

                                                                      + */ + certificatesToDelete?: string[]; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token return the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace UpdateTrustStoreRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTrustStoreRequest): any => ({ + ...obj, + }); +} + +export interface UpdateTrustStoreResponse { + /** + *

                                                                      The ARN of the trust store.

                                                                      + */ + trustStoreArn: string | undefined; +} + +export namespace UpdateTrustStoreResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateTrustStoreResponse): any => ({ + ...obj, + }); +} + +export interface UpdateUserSettingsRequest { + /** + *

                                                                      The ARN of the user settings.

                                                                      + */ + userSettingsArn: string | undefined; + + /** + *

                                                                      Specifies whether the user can copy text from the streaming session to the local + * device.

                                                                      + */ + copyAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can paste text from the local device to the streaming + * session.

                                                                      + */ + pasteAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can download files from the streaming session to the local + * device.

                                                                      + */ + downloadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can upload files from the local device to the streaming + * session.

                                                                      + */ + uploadAllowed?: EnabledType | string; + + /** + *

                                                                      Specifies whether the user can print to the local device.

                                                                      + */ + printAllowed?: EnabledType | string; + + /** + *

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the + * request. Idempotency ensures that an API request completes only once. With an idempotent + * request, if the original request completes successfully, subsequent retries with the same + * client token return the result from the original successful request.

                                                                      + *

                                                                      If you do not specify a client token, one is automatically generated by the AWS + * SDK.

                                                                      + */ + clientToken?: string; +} + +export namespace UpdateUserSettingsRequest { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserSettingsRequest): any => ({ + ...obj, + }); +} + +export interface UpdateUserSettingsResponse { + /** + *

                                                                      The user settings.

                                                                      + */ + userSettings: UserSettings | undefined; +} + +export namespace UpdateUserSettingsResponse { + /** + * @internal + */ + export const filterSensitiveLog = (obj: UpdateUserSettingsResponse): any => ({ + ...obj, + }); +} diff --git a/clients/client-workspaces-web/src/pagination/Interfaces.ts b/clients/client-workspaces-web/src/pagination/Interfaces.ts new file mode 100644 index 000000000000..ee188427a395 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/Interfaces.ts @@ -0,0 +1,8 @@ +import { PaginationConfiguration } from "@aws-sdk/types"; + +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; + +export interface WorkSpacesWebPaginationConfiguration extends PaginationConfiguration { + client: WorkSpacesWeb | WorkSpacesWebClient; +} diff --git a/clients/client-workspaces-web/src/pagination/ListBrowserSettingsPaginator.ts b/clients/client-workspaces-web/src/pagination/ListBrowserSettingsPaginator.ts new file mode 100644 index 000000000000..b43f6a67071b --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListBrowserSettingsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListBrowserSettingsCommand, + ListBrowserSettingsCommandInput, + ListBrowserSettingsCommandOutput, +} from "../commands/ListBrowserSettingsCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListBrowserSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListBrowserSettingsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListBrowserSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listBrowserSettings(input, ...args); +}; +export async function* paginateListBrowserSettings( + config: WorkSpacesWebPaginationConfiguration, + input: ListBrowserSettingsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListBrowserSettingsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListIdentityProvidersPaginator.ts b/clients/client-workspaces-web/src/pagination/ListIdentityProvidersPaginator.ts new file mode 100644 index 000000000000..60ac4b9eb7f2 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListIdentityProvidersPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListIdentityProvidersCommand, + ListIdentityProvidersCommandInput, + ListIdentityProvidersCommandOutput, +} from "../commands/ListIdentityProvidersCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListIdentityProvidersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListIdentityProvidersCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListIdentityProvidersCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listIdentityProviders(input, ...args); +}; +export async function* paginateListIdentityProviders( + config: WorkSpacesWebPaginationConfiguration, + input: ListIdentityProvidersCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListIdentityProvidersCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListNetworkSettingsPaginator.ts b/clients/client-workspaces-web/src/pagination/ListNetworkSettingsPaginator.ts new file mode 100644 index 000000000000..6697dd0e8b19 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListNetworkSettingsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListNetworkSettingsCommand, + ListNetworkSettingsCommandInput, + ListNetworkSettingsCommandOutput, +} from "../commands/ListNetworkSettingsCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListNetworkSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListNetworkSettingsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListNetworkSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listNetworkSettings(input, ...args); +}; +export async function* paginateListNetworkSettings( + config: WorkSpacesWebPaginationConfiguration, + input: ListNetworkSettingsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListNetworkSettingsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListPortalsPaginator.ts b/clients/client-workspaces-web/src/pagination/ListPortalsPaginator.ts new file mode 100644 index 000000000000..2339d088f304 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListPortalsPaginator.ts @@ -0,0 +1,55 @@ +import { Paginator } from "@aws-sdk/types"; + +import { ListPortalsCommand, ListPortalsCommandInput, ListPortalsCommandOutput } from "../commands/ListPortalsCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListPortalsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListPortalsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListPortalsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listPortals(input, ...args); +}; +export async function* paginateListPortals( + config: WorkSpacesWebPaginationConfiguration, + input: ListPortalsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListPortalsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListTrustStoreCertificatesPaginator.ts b/clients/client-workspaces-web/src/pagination/ListTrustStoreCertificatesPaginator.ts new file mode 100644 index 000000000000..1b13b364711c --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListTrustStoreCertificatesPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTrustStoreCertificatesCommand, + ListTrustStoreCertificatesCommandInput, + ListTrustStoreCertificatesCommandOutput, +} from "../commands/ListTrustStoreCertificatesCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListTrustStoreCertificatesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTrustStoreCertificatesCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListTrustStoreCertificatesCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTrustStoreCertificates(input, ...args); +}; +export async function* paginateListTrustStoreCertificates( + config: WorkSpacesWebPaginationConfiguration, + input: ListTrustStoreCertificatesCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTrustStoreCertificatesCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListTrustStoresPaginator.ts b/clients/client-workspaces-web/src/pagination/ListTrustStoresPaginator.ts new file mode 100644 index 000000000000..73a8e27f008a --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListTrustStoresPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListTrustStoresCommand, + ListTrustStoresCommandInput, + ListTrustStoresCommandOutput, +} from "../commands/ListTrustStoresCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListTrustStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListTrustStoresCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListTrustStoresCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listTrustStores(input, ...args); +}; +export async function* paginateListTrustStores( + config: WorkSpacesWebPaginationConfiguration, + input: ListTrustStoresCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListTrustStoresCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/ListUserSettingsPaginator.ts b/clients/client-workspaces-web/src/pagination/ListUserSettingsPaginator.ts new file mode 100644 index 000000000000..e3c4c6f6d807 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/ListUserSettingsPaginator.ts @@ -0,0 +1,59 @@ +import { Paginator } from "@aws-sdk/types"; + +import { + ListUserSettingsCommand, + ListUserSettingsCommandInput, + ListUserSettingsCommandOutput, +} from "../commands/ListUserSettingsCommand"; +import { WorkSpacesWeb } from "../WorkSpacesWeb"; +import { WorkSpacesWebClient } from "../WorkSpacesWebClient"; +import { WorkSpacesWebPaginationConfiguration } from "./Interfaces"; + +/** + * @private + */ +const makePagedClientRequest = async ( + client: WorkSpacesWebClient, + input: ListUserSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.send(new ListUserSettingsCommand(input), ...args); +}; +/** + * @private + */ +const makePagedRequest = async ( + client: WorkSpacesWeb, + input: ListUserSettingsCommandInput, + ...args: any +): Promise => { + // @ts-ignore + return await client.listUserSettings(input, ...args); +}; +export async function* paginateListUserSettings( + config: WorkSpacesWebPaginationConfiguration, + input: ListUserSettingsCommandInput, + ...additionalArguments: any +): Paginator { + // ToDo: replace with actual type instead of typeof input.nextToken + let token: typeof input.nextToken | undefined = config.startingToken || undefined; + let hasNext = true; + let page: ListUserSettingsCommandOutput; + while (hasNext) { + input.nextToken = token; + input["maxResults"] = config.pageSize; + if (config.client instanceof WorkSpacesWeb) { + page = await makePagedRequest(config.client, input, ...additionalArguments); + } else if (config.client instanceof WorkSpacesWebClient) { + page = await makePagedClientRequest(config.client, input, ...additionalArguments); + } else { + throw new Error("Invalid client, expected WorkSpacesWeb | WorkSpacesWebClient"); + } + yield page; + token = page.nextToken; + hasNext = !!token; + } + // @ts-ignore + return undefined; +} diff --git a/clients/client-workspaces-web/src/pagination/index.ts b/clients/client-workspaces-web/src/pagination/index.ts new file mode 100644 index 000000000000..ec01682e0293 --- /dev/null +++ b/clients/client-workspaces-web/src/pagination/index.ts @@ -0,0 +1,8 @@ +export * from "./Interfaces"; +export * from "./ListBrowserSettingsPaginator"; +export * from "./ListIdentityProvidersPaginator"; +export * from "./ListNetworkSettingsPaginator"; +export * from "./ListPortalsPaginator"; +export * from "./ListTrustStoreCertificatesPaginator"; +export * from "./ListTrustStoresPaginator"; +export * from "./ListUserSettingsPaginator"; diff --git a/clients/client-workspaces-web/src/protocols/Aws_restJson1.ts b/clients/client-workspaces-web/src/protocols/Aws_restJson1.ts new file mode 100644 index 000000000000..197b860236b4 --- /dev/null +++ b/clients/client-workspaces-web/src/protocols/Aws_restJson1.ts @@ -0,0 +1,6344 @@ +import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http"; +import { + expectNonNull as __expectNonNull, + expectNumber as __expectNumber, + expectObject as __expectObject, + expectString as __expectString, + extendedEncodeURIComponent as __extendedEncodeURIComponent, + parseEpochTimestamp as __parseEpochTimestamp, + strictParseInt32 as __strictParseInt32, +} from "@aws-sdk/smithy-client"; +import { + Endpoint as __Endpoint, + MetadataBearer as __MetadataBearer, + ResponseMetadata as __ResponseMetadata, + SerdeContext as __SerdeContext, + SmithyException as __SmithyException, +} from "@aws-sdk/types"; +import { v4 as generateIdempotencyToken } from "uuid"; + +import { + AssociateBrowserSettingsCommandInput, + AssociateBrowserSettingsCommandOutput, +} from "../commands/AssociateBrowserSettingsCommand"; +import { + AssociateNetworkSettingsCommandInput, + AssociateNetworkSettingsCommandOutput, +} from "../commands/AssociateNetworkSettingsCommand"; +import { + AssociateTrustStoreCommandInput, + AssociateTrustStoreCommandOutput, +} from "../commands/AssociateTrustStoreCommand"; +import { + AssociateUserSettingsCommandInput, + AssociateUserSettingsCommandOutput, +} from "../commands/AssociateUserSettingsCommand"; +import { + CreateBrowserSettingsCommandInput, + CreateBrowserSettingsCommandOutput, +} from "../commands/CreateBrowserSettingsCommand"; +import { + CreateIdentityProviderCommandInput, + CreateIdentityProviderCommandOutput, +} from "../commands/CreateIdentityProviderCommand"; +import { + CreateNetworkSettingsCommandInput, + CreateNetworkSettingsCommandOutput, +} from "../commands/CreateNetworkSettingsCommand"; +import { CreatePortalCommandInput, CreatePortalCommandOutput } from "../commands/CreatePortalCommand"; +import { CreateTrustStoreCommandInput, CreateTrustStoreCommandOutput } from "../commands/CreateTrustStoreCommand"; +import { CreateUserSettingsCommandInput, CreateUserSettingsCommandOutput } from "../commands/CreateUserSettingsCommand"; +import { + DeleteBrowserSettingsCommandInput, + DeleteBrowserSettingsCommandOutput, +} from "../commands/DeleteBrowserSettingsCommand"; +import { + DeleteIdentityProviderCommandInput, + DeleteIdentityProviderCommandOutput, +} from "../commands/DeleteIdentityProviderCommand"; +import { + DeleteNetworkSettingsCommandInput, + DeleteNetworkSettingsCommandOutput, +} from "../commands/DeleteNetworkSettingsCommand"; +import { DeletePortalCommandInput, DeletePortalCommandOutput } from "../commands/DeletePortalCommand"; +import { DeleteTrustStoreCommandInput, DeleteTrustStoreCommandOutput } from "../commands/DeleteTrustStoreCommand"; +import { DeleteUserSettingsCommandInput, DeleteUserSettingsCommandOutput } from "../commands/DeleteUserSettingsCommand"; +import { + DisassociateBrowserSettingsCommandInput, + DisassociateBrowserSettingsCommandOutput, +} from "../commands/DisassociateBrowserSettingsCommand"; +import { + DisassociateNetworkSettingsCommandInput, + DisassociateNetworkSettingsCommandOutput, +} from "../commands/DisassociateNetworkSettingsCommand"; +import { + DisassociateTrustStoreCommandInput, + DisassociateTrustStoreCommandOutput, +} from "../commands/DisassociateTrustStoreCommand"; +import { + DisassociateUserSettingsCommandInput, + DisassociateUserSettingsCommandOutput, +} from "../commands/DisassociateUserSettingsCommand"; +import { GetBrowserSettingsCommandInput, GetBrowserSettingsCommandOutput } from "../commands/GetBrowserSettingsCommand"; +import { + GetIdentityProviderCommandInput, + GetIdentityProviderCommandOutput, +} from "../commands/GetIdentityProviderCommand"; +import { GetNetworkSettingsCommandInput, GetNetworkSettingsCommandOutput } from "../commands/GetNetworkSettingsCommand"; +import { GetPortalCommandInput, GetPortalCommandOutput } from "../commands/GetPortalCommand"; +import { + GetPortalServiceProviderMetadataCommandInput, + GetPortalServiceProviderMetadataCommandOutput, +} from "../commands/GetPortalServiceProviderMetadataCommand"; +import { + GetTrustStoreCertificateCommandInput, + GetTrustStoreCertificateCommandOutput, +} from "../commands/GetTrustStoreCertificateCommand"; +import { GetTrustStoreCommandInput, GetTrustStoreCommandOutput } from "../commands/GetTrustStoreCommand"; +import { GetUserSettingsCommandInput, GetUserSettingsCommandOutput } from "../commands/GetUserSettingsCommand"; +import { + ListBrowserSettingsCommandInput, + ListBrowserSettingsCommandOutput, +} from "../commands/ListBrowserSettingsCommand"; +import { + ListIdentityProvidersCommandInput, + ListIdentityProvidersCommandOutput, +} from "../commands/ListIdentityProvidersCommand"; +import { + ListNetworkSettingsCommandInput, + ListNetworkSettingsCommandOutput, +} from "../commands/ListNetworkSettingsCommand"; +import { ListPortalsCommandInput, ListPortalsCommandOutput } from "../commands/ListPortalsCommand"; +import { + ListTagsForResourceCommandInput, + ListTagsForResourceCommandOutput, +} from "../commands/ListTagsForResourceCommand"; +import { + ListTrustStoreCertificatesCommandInput, + ListTrustStoreCertificatesCommandOutput, +} from "../commands/ListTrustStoreCertificatesCommand"; +import { ListTrustStoresCommandInput, ListTrustStoresCommandOutput } from "../commands/ListTrustStoresCommand"; +import { ListUserSettingsCommandInput, ListUserSettingsCommandOutput } from "../commands/ListUserSettingsCommand"; +import { TagResourceCommandInput, TagResourceCommandOutput } from "../commands/TagResourceCommand"; +import { UntagResourceCommandInput, UntagResourceCommandOutput } from "../commands/UntagResourceCommand"; +import { + UpdateBrowserSettingsCommandInput, + UpdateBrowserSettingsCommandOutput, +} from "../commands/UpdateBrowserSettingsCommand"; +import { + UpdateIdentityProviderCommandInput, + UpdateIdentityProviderCommandOutput, +} from "../commands/UpdateIdentityProviderCommand"; +import { + UpdateNetworkSettingsCommandInput, + UpdateNetworkSettingsCommandOutput, +} from "../commands/UpdateNetworkSettingsCommand"; +import { UpdatePortalCommandInput, UpdatePortalCommandOutput } from "../commands/UpdatePortalCommand"; +import { UpdateTrustStoreCommandInput, UpdateTrustStoreCommandOutput } from "../commands/UpdateTrustStoreCommand"; +import { UpdateUserSettingsCommandInput, UpdateUserSettingsCommandOutput } from "../commands/UpdateUserSettingsCommand"; +import { + AccessDeniedException, + BrowserSettings, + BrowserSettingsSummary, + Certificate, + CertificateSummary, + ConflictException, + IdentityProvider, + IdentityProviderSummary, + InternalServerException, + NetworkSettings, + NetworkSettingsSummary, + Portal, + PortalSummary, + ResourceNotFoundException, + ServiceQuotaExceededException, + Tag, + ThrottlingException, + TooManyTagsException, + TrustStore, + TrustStoreSummary, + UserSettings, + UserSettingsSummary, + ValidationException, + ValidationExceptionField, +} from "../models/models_0"; + +export const serializeAws_restJson1AssociateBrowserSettingsCommand = async ( + input: AssociateBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/browserSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + const query: any = { + ...(input.browserSettingsArn !== undefined && { browserSettingsArn: input.browserSettingsArn }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1AssociateNetworkSettingsCommand = async ( + input: AssociateNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/networkSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + const query: any = { + ...(input.networkSettingsArn !== undefined && { networkSettingsArn: input.networkSettingsArn }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1AssociateTrustStoreCommand = async ( + input: AssociateTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/trustStores"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + const query: any = { + ...(input.trustStoreArn !== undefined && { trustStoreArn: input.trustStoreArn }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1AssociateUserSettingsCommand = async ( + input: AssociateUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/userSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + const query: any = { + ...(input.userSettingsArn !== undefined && { userSettingsArn: input.userSettingsArn }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1CreateBrowserSettingsCommand = async ( + input: CreateBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/browserSettings"; + let body: any; + body = JSON.stringify({ + ...(input.additionalEncryptionContext !== undefined && + input.additionalEncryptionContext !== null && { + additionalEncryptionContext: serializeAws_restJson1EncryptionContextMap( + input.additionalEncryptionContext, + context + ), + }), + ...(input.browserPolicy !== undefined && input.browserPolicy !== null && { browserPolicy: input.browserPolicy }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.customerManagedKey !== undefined && + input.customerManagedKey !== null && { customerManagedKey: input.customerManagedKey }), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateIdentityProviderCommand = async ( + input: CreateIdentityProviderCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/identityProviders"; + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.identityProviderDetails !== undefined && + input.identityProviderDetails !== null && { + identityProviderDetails: serializeAws_restJson1IdentityProviderDetails(input.identityProviderDetails, context), + }), + ...(input.identityProviderName !== undefined && + input.identityProviderName !== null && { identityProviderName: input.identityProviderName }), + ...(input.identityProviderType !== undefined && + input.identityProviderType !== null && { identityProviderType: input.identityProviderType }), + ...(input.portalArn !== undefined && input.portalArn !== null && { portalArn: input.portalArn }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateNetworkSettingsCommand = async ( + input: CreateNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/networkSettings"; + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.securityGroupIds !== undefined && + input.securityGroupIds !== null && { + securityGroupIds: serializeAws_restJson1SecurityGroupIdList(input.securityGroupIds, context), + }), + ...(input.subnetIds !== undefined && + input.subnetIds !== null && { subnetIds: serializeAws_restJson1SubnetIdList(input.subnetIds, context) }), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + ...(input.vpcId !== undefined && input.vpcId !== null && { vpcId: input.vpcId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreatePortalCommand = async ( + input: CreatePortalCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals"; + let body: any; + body = JSON.stringify({ + ...(input.additionalEncryptionContext !== undefined && + input.additionalEncryptionContext !== null && { + additionalEncryptionContext: serializeAws_restJson1EncryptionContextMap( + input.additionalEncryptionContext, + context + ), + }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.customerManagedKey !== undefined && + input.customerManagedKey !== null && { customerManagedKey: input.customerManagedKey }), + ...(input.displayName !== undefined && input.displayName !== null && { displayName: input.displayName }), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateTrustStoreCommand = async ( + input: CreateTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores"; + let body: any; + body = JSON.stringify({ + ...(input.certificateList !== undefined && + input.certificateList !== null && { + certificateList: serializeAws_restJson1CertificateList(input.certificateList, context), + }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1CreateUserSettingsCommand = async ( + input: CreateUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/userSettings"; + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.copyAllowed !== undefined && input.copyAllowed !== null && { copyAllowed: input.copyAllowed }), + ...(input.downloadAllowed !== undefined && + input.downloadAllowed !== null && { downloadAllowed: input.downloadAllowed }), + ...(input.pasteAllowed !== undefined && input.pasteAllowed !== null && { pasteAllowed: input.pasteAllowed }), + ...(input.printAllowed !== undefined && input.printAllowed !== null && { printAllowed: input.printAllowed }), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + ...(input.uploadAllowed !== undefined && input.uploadAllowed !== null && { uploadAllowed: input.uploadAllowed }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteBrowserSettingsCommand = async ( + input: DeleteBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/browserSettings/{browserSettingsArn+}"; + if (input.browserSettingsArn !== undefined) { + const labelValue: string = input.browserSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: browserSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{browserSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: browserSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteIdentityProviderCommand = async ( + input: DeleteIdentityProviderCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/identityProviders/{identityProviderArn+}"; + if (input.identityProviderArn !== undefined) { + const labelValue: string = input.identityProviderArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: identityProviderArn."); + } + resolvedPath = resolvedPath.replace( + "{identityProviderArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: identityProviderArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteNetworkSettingsCommand = async ( + input: DeleteNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/networkSettings/{networkSettingsArn+}"; + if (input.networkSettingsArn !== undefined) { + const labelValue: string = input.networkSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: networkSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{networkSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: networkSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeletePortalCommand = async ( + input: DeletePortalCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteTrustStoreCommand = async ( + input: DeleteTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores/{trustStoreArn+}"; + if (input.trustStoreArn !== undefined) { + const labelValue: string = input.trustStoreArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: trustStoreArn."); + } + resolvedPath = resolvedPath.replace( + "{trustStoreArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: trustStoreArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DeleteUserSettingsCommand = async ( + input: DeleteUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/userSettings/{userSettingsArn+}"; + if (input.userSettingsArn !== undefined) { + const labelValue: string = input.userSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{userSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: userSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisassociateBrowserSettingsCommand = async ( + input: DisassociateBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/browserSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisassociateNetworkSettingsCommand = async ( + input: DisassociateNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/networkSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisassociateTrustStoreCommand = async ( + input: DisassociateTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/trustStores"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1DisassociateUserSettingsCommand = async ( + input: DisassociateUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/userSettings"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetBrowserSettingsCommand = async ( + input: GetBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/browserSettings/{browserSettingsArn+}"; + if (input.browserSettingsArn !== undefined) { + const labelValue: string = input.browserSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: browserSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{browserSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: browserSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetIdentityProviderCommand = async ( + input: GetIdentityProviderCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/identityProviders/{identityProviderArn+}"; + if (input.identityProviderArn !== undefined) { + const labelValue: string = input.identityProviderArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: identityProviderArn."); + } + resolvedPath = resolvedPath.replace( + "{identityProviderArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: identityProviderArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetNetworkSettingsCommand = async ( + input: GetNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/networkSettings/{networkSettingsArn+}"; + if (input.networkSettingsArn !== undefined) { + const labelValue: string = input.networkSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: networkSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{networkSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: networkSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetPortalCommand = async ( + input: GetPortalCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetPortalServiceProviderMetadataCommand = async ( + input: GetPortalServiceProviderMetadataCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portalIdp/{portalArn+}"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetTrustStoreCommand = async ( + input: GetTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores/{trustStoreArn+}"; + if (input.trustStoreArn !== undefined) { + const labelValue: string = input.trustStoreArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: trustStoreArn."); + } + resolvedPath = resolvedPath.replace( + "{trustStoreArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: trustStoreArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1GetTrustStoreCertificateCommand = async ( + input: GetTrustStoreCertificateCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores/{trustStoreArn+}/certificate"; + if (input.trustStoreArn !== undefined) { + const labelValue: string = input.trustStoreArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: trustStoreArn."); + } + resolvedPath = resolvedPath.replace( + "{trustStoreArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: trustStoreArn."); + } + const query: any = { + ...(input.thumbprint !== undefined && { thumbprint: input.thumbprint }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1GetUserSettingsCommand = async ( + input: GetUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/userSettings/{userSettingsArn+}"; + if (input.userSettingsArn !== undefined) { + const labelValue: string = input.userSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{userSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: userSettingsArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListBrowserSettingsCommand = async ( + input: ListBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/browserSettings"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListIdentityProvidersCommand = async ( + input: ListIdentityProvidersCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}/identityProviders"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListNetworkSettingsCommand = async ( + input: ListNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/networkSettings"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListPortalsCommand = async ( + input: ListPortalsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListTagsForResourceCommand = async ( + input: ListTagsForResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn+}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace( + "{resourceArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1ListTrustStoreCertificatesCommand = async ( + input: ListTrustStoreCertificatesCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + + "/trustStores/{trustStoreArn+}/certificates"; + if (input.trustStoreArn !== undefined) { + const labelValue: string = input.trustStoreArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: trustStoreArn."); + } + resolvedPath = resolvedPath.replace( + "{trustStoreArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: trustStoreArn."); + } + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListTrustStoresCommand = async ( + input: ListTrustStoresCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1ListUserSettingsCommand = async ( + input: ListUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + const resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/userSettings"; + const query: any = { + ...(input.nextToken !== undefined && { nextToken: input.nextToken }), + ...(input.maxResults !== undefined && { maxResults: input.maxResults.toString() }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "GET", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1TagResourceCommand = async ( + input: TagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn+}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace( + "{resourceArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.tags !== undefined && + input.tags !== null && { tags: serializeAws_restJson1TagList(input.tags, context) }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "POST", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UntagResourceCommand = async ( + input: UntagResourceCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = {}; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/tags/{resourceArn+}"; + if (input.resourceArn !== undefined) { + const labelValue: string = input.resourceArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: resourceArn."); + } + resolvedPath = resolvedPath.replace( + "{resourceArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: resourceArn."); + } + const query: any = { + ...(input.tagKeys !== undefined && { tagKeys: (input.tagKeys || []).map((_entry) => _entry as any) }), + }; + let body: any; + return new __HttpRequest({ + protocol, + hostname, + port, + method: "DELETE", + headers, + path: resolvedPath, + query, + body, + }); +}; + +export const serializeAws_restJson1UpdateBrowserSettingsCommand = async ( + input: UpdateBrowserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/browserSettings/{browserSettingsArn+}"; + if (input.browserSettingsArn !== undefined) { + const labelValue: string = input.browserSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: browserSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{browserSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: browserSettingsArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.browserPolicy !== undefined && input.browserPolicy !== null && { browserPolicy: input.browserPolicy }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateIdentityProviderCommand = async ( + input: UpdateIdentityProviderCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/identityProviders/{identityProviderArn+}"; + if (input.identityProviderArn !== undefined) { + const labelValue: string = input.identityProviderArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: identityProviderArn."); + } + resolvedPath = resolvedPath.replace( + "{identityProviderArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: identityProviderArn."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.identityProviderDetails !== undefined && + input.identityProviderDetails !== null && { + identityProviderDetails: serializeAws_restJson1IdentityProviderDetails(input.identityProviderDetails, context), + }), + ...(input.identityProviderName !== undefined && + input.identityProviderName !== null && { identityProviderName: input.identityProviderName }), + ...(input.identityProviderType !== undefined && + input.identityProviderType !== null && { identityProviderType: input.identityProviderType }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateNetworkSettingsCommand = async ( + input: UpdateNetworkSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/networkSettings/{networkSettingsArn+}"; + if (input.networkSettingsArn !== undefined) { + const labelValue: string = input.networkSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: networkSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{networkSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: networkSettingsArn."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.securityGroupIds !== undefined && + input.securityGroupIds !== null && { + securityGroupIds: serializeAws_restJson1SecurityGroupIdList(input.securityGroupIds, context), + }), + ...(input.subnetIds !== undefined && + input.subnetIds !== null && { subnetIds: serializeAws_restJson1SubnetIdList(input.subnetIds, context) }), + ...(input.vpcId !== undefined && input.vpcId !== null && { vpcId: input.vpcId }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdatePortalCommand = async ( + input: UpdatePortalCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/portals/{portalArn+}"; + if (input.portalArn !== undefined) { + const labelValue: string = input.portalArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: portalArn."); + } + resolvedPath = resolvedPath.replace( + "{portalArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: portalArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.displayName !== undefined && input.displayName !== null && { displayName: input.displayName }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PUT", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateTrustStoreCommand = async ( + input: UpdateTrustStoreCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/trustStores/{trustStoreArn+}"; + if (input.trustStoreArn !== undefined) { + const labelValue: string = input.trustStoreArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: trustStoreArn."); + } + resolvedPath = resolvedPath.replace( + "{trustStoreArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: trustStoreArn."); + } + let body: any; + body = JSON.stringify({ + ...(input.certificatesToAdd !== undefined && + input.certificatesToAdd !== null && { + certificatesToAdd: serializeAws_restJson1CertificateList(input.certificatesToAdd, context), + }), + ...(input.certificatesToDelete !== undefined && + input.certificatesToDelete !== null && { + certificatesToDelete: serializeAws_restJson1CertificateThumbprintList(input.certificatesToDelete, context), + }), + clientToken: input.clientToken ?? generateIdempotencyToken(), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const serializeAws_restJson1UpdateUserSettingsCommand = async ( + input: UpdateUserSettingsCommandInput, + context: __SerdeContext +): Promise<__HttpRequest> => { + const { hostname, protocol = "https", port, path: basePath } = await context.endpoint(); + const headers: any = { + "content-type": "application/json", + }; + let resolvedPath = + `${basePath?.endsWith("/") ? basePath.slice(0, -1) : basePath || ""}` + "/userSettings/{userSettingsArn+}"; + if (input.userSettingsArn !== undefined) { + const labelValue: string = input.userSettingsArn; + if (labelValue.length <= 0) { + throw new Error("Empty value provided for input HTTP label: userSettingsArn."); + } + resolvedPath = resolvedPath.replace( + "{userSettingsArn+}", + labelValue + .split("/") + .map((segment) => __extendedEncodeURIComponent(segment)) + .join("/") + ); + } else { + throw new Error("No value provided for input HTTP label: userSettingsArn."); + } + let body: any; + body = JSON.stringify({ + clientToken: input.clientToken ?? generateIdempotencyToken(), + ...(input.copyAllowed !== undefined && input.copyAllowed !== null && { copyAllowed: input.copyAllowed }), + ...(input.downloadAllowed !== undefined && + input.downloadAllowed !== null && { downloadAllowed: input.downloadAllowed }), + ...(input.pasteAllowed !== undefined && input.pasteAllowed !== null && { pasteAllowed: input.pasteAllowed }), + ...(input.printAllowed !== undefined && input.printAllowed !== null && { printAllowed: input.printAllowed }), + ...(input.uploadAllowed !== undefined && input.uploadAllowed !== null && { uploadAllowed: input.uploadAllowed }), + }); + return new __HttpRequest({ + protocol, + hostname, + port, + method: "PATCH", + headers, + path: resolvedPath, + body, + }); +}; + +export const deserializeAws_restJson1AssociateBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateBrowserSettingsCommandError(output, context); + } + const contents: AssociateBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + browserSettingsArn: undefined, + portalArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.browserSettingsArn !== undefined && data.browserSettingsArn !== null) { + contents.browserSettingsArn = __expectString(data.browserSettingsArn); + } + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1AssociateNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateNetworkSettingsCommandError(output, context); + } + const contents: AssociateNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + networkSettingsArn: undefined, + portalArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.networkSettingsArn !== undefined && data.networkSettingsArn !== null) { + contents.networkSettingsArn = __expectString(data.networkSettingsArn); + } + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1AssociateTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateTrustStoreCommandError(output, context); + } + const contents: AssociateTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + portalArn: undefined, + trustStoreArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + if (data.trustStoreArn !== undefined && data.trustStoreArn !== null) { + contents.trustStoreArn = __expectString(data.trustStoreArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1AssociateUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1AssociateUserSettingsCommandError(output, context); + } + const contents: AssociateUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + portalArn: undefined, + userSettingsArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + if (data.userSettingsArn !== undefined && data.userSettingsArn !== null) { + contents.userSettingsArn = __expectString(data.userSettingsArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1AssociateUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateBrowserSettingsCommandError(output, context); + } + const contents: CreateBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + browserSettingsArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.browserSettingsArn !== undefined && data.browserSettingsArn !== null) { + contents.browserSettingsArn = __expectString(data.browserSettingsArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateIdentityProviderCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateIdentityProviderCommandError(output, context); + } + const contents: CreateIdentityProviderCommandOutput = { + $metadata: deserializeMetadata(output), + identityProviderArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.identityProviderArn !== undefined && data.identityProviderArn !== null) { + contents.identityProviderArn = __expectString(data.identityProviderArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateIdentityProviderCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateNetworkSettingsCommandError(output, context); + } + const contents: CreateNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + networkSettingsArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.networkSettingsArn !== undefined && data.networkSettingsArn !== null) { + contents.networkSettingsArn = __expectString(data.networkSettingsArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreatePortalCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreatePortalCommandError(output, context); + } + const contents: CreatePortalCommandOutput = { + $metadata: deserializeMetadata(output), + portalArn: undefined, + portalEndpoint: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + if (data.portalEndpoint !== undefined && data.portalEndpoint !== null) { + contents.portalEndpoint = __expectString(data.portalEndpoint); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreatePortalCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateTrustStoreCommandError(output, context); + } + const contents: CreateTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + trustStoreArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.trustStoreArn !== undefined && data.trustStoreArn !== null) { + contents.trustStoreArn = __expectString(data.trustStoreArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1CreateUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1CreateUserSettingsCommandError(output, context); + } + const contents: CreateUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + userSettingsArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.userSettingsArn !== undefined && data.userSettingsArn !== null) { + contents.userSettingsArn = __expectString(data.userSettingsArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1CreateUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteBrowserSettingsCommandError(output, context); + } + const contents: DeleteBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteIdentityProviderCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteIdentityProviderCommandError(output, context); + } + const contents: DeleteIdentityProviderCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteIdentityProviderCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteNetworkSettingsCommandError(output, context); + } + const contents: DeleteNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeletePortalCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeletePortalCommandError(output, context); + } + const contents: DeletePortalCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeletePortalCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteTrustStoreCommandError(output, context); + } + const contents: DeleteTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DeleteUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DeleteUserSettingsCommandError(output, context); + } + const contents: DeleteUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DeleteUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ConflictException": + case "com.amazonaws.workspacesweb#ConflictException": + response = { + ...(await deserializeAws_restJson1ConflictExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateBrowserSettingsCommandError(output, context); + } + const contents: DisassociateBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateNetworkSettingsCommandError(output, context); + } + const contents: DisassociateNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateTrustStoreCommandError(output, context); + } + const contents: DisassociateTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1DisassociateUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1DisassociateUserSettingsCommandError(output, context); + } + const contents: DisassociateUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1DisassociateUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetBrowserSettingsCommandError(output, context); + } + const contents: GetBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + browserSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.browserSettings !== undefined && data.browserSettings !== null) { + contents.browserSettings = deserializeAws_restJson1BrowserSettings(data.browserSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetIdentityProviderCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetIdentityProviderCommandError(output, context); + } + const contents: GetIdentityProviderCommandOutput = { + $metadata: deserializeMetadata(output), + identityProvider: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.identityProvider !== undefined && data.identityProvider !== null) { + contents.identityProvider = deserializeAws_restJson1IdentityProvider(data.identityProvider, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetIdentityProviderCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetNetworkSettingsCommandError(output, context); + } + const contents: GetNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + networkSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.networkSettings !== undefined && data.networkSettings !== null) { + contents.networkSettings = deserializeAws_restJson1NetworkSettings(data.networkSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetPortalCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetPortalCommandError(output, context); + } + const contents: GetPortalCommandOutput = { + $metadata: deserializeMetadata(output), + portal: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portal !== undefined && data.portal !== null) { + contents.portal = deserializeAws_restJson1Portal(data.portal, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetPortalCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetPortalServiceProviderMetadataCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetPortalServiceProviderMetadataCommandError(output, context); + } + const contents: GetPortalServiceProviderMetadataCommandOutput = { + $metadata: deserializeMetadata(output), + portalArn: undefined, + serviceProviderSamlMetadata: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portalArn !== undefined && data.portalArn !== null) { + contents.portalArn = __expectString(data.portalArn); + } + if (data.serviceProviderSamlMetadata !== undefined && data.serviceProviderSamlMetadata !== null) { + contents.serviceProviderSamlMetadata = __expectString(data.serviceProviderSamlMetadata); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetPortalServiceProviderMetadataCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetTrustStoreCommandError(output, context); + } + const contents: GetTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + trustStore: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.trustStore !== undefined && data.trustStore !== null) { + contents.trustStore = deserializeAws_restJson1TrustStore(data.trustStore, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetTrustStoreCertificateCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetTrustStoreCertificateCommandError(output, context); + } + const contents: GetTrustStoreCertificateCommandOutput = { + $metadata: deserializeMetadata(output), + certificate: undefined, + trustStoreArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.certificate !== undefined && data.certificate !== null) { + contents.certificate = deserializeAws_restJson1Certificate(data.certificate, context); + } + if (data.trustStoreArn !== undefined && data.trustStoreArn !== null) { + contents.trustStoreArn = __expectString(data.trustStoreArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetTrustStoreCertificateCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1GetUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1GetUserSettingsCommandError(output, context); + } + const contents: GetUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + userSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.userSettings !== undefined && data.userSettings !== null) { + contents.userSettings = deserializeAws_restJson1UserSettings(data.userSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1GetUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListBrowserSettingsCommandError(output, context); + } + const contents: ListBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + browserSettings: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.browserSettings !== undefined && data.browserSettings !== null) { + contents.browserSettings = deserializeAws_restJson1BrowserSettingsList(data.browserSettings, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListIdentityProvidersCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListIdentityProvidersCommandError(output, context); + } + const contents: ListIdentityProvidersCommandOutput = { + $metadata: deserializeMetadata(output), + identityProviders: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.identityProviders !== undefined && data.identityProviders !== null) { + contents.identityProviders = deserializeAws_restJson1IdentityProviderList(data.identityProviders, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListIdentityProvidersCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListNetworkSettingsCommandError(output, context); + } + const contents: ListNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + networkSettings: undefined, + nextToken: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.networkSettings !== undefined && data.networkSettings !== null) { + contents.networkSettings = deserializeAws_restJson1NetworkSettingsList(data.networkSettings, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListPortalsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListPortalsCommandError(output, context); + } + const contents: ListPortalsCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + portals: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.portals !== undefined && data.portals !== null) { + contents.portals = deserializeAws_restJson1PortalList(data.portals, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListPortalsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTagsForResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTagsForResourceCommandError(output, context); + } + const contents: ListTagsForResourceCommandOutput = { + $metadata: deserializeMetadata(output), + tags: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.tags !== undefined && data.tags !== null) { + contents.tags = deserializeAws_restJson1TagList(data.tags, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTagsForResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTrustStoreCertificatesCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTrustStoreCertificatesCommandError(output, context); + } + const contents: ListTrustStoreCertificatesCommandOutput = { + $metadata: deserializeMetadata(output), + certificateList: undefined, + nextToken: undefined, + trustStoreArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.certificateList !== undefined && data.certificateList !== null) { + contents.certificateList = deserializeAws_restJson1CertificateSummaryList(data.certificateList, context); + } + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.trustStoreArn !== undefined && data.trustStoreArn !== null) { + contents.trustStoreArn = __expectString(data.trustStoreArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTrustStoreCertificatesCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListTrustStoresCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListTrustStoresCommandError(output, context); + } + const contents: ListTrustStoresCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + trustStores: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.trustStores !== undefined && data.trustStores !== null) { + contents.trustStores = deserializeAws_restJson1TrustStoreSummaryList(data.trustStores, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListTrustStoresCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1ListUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1ListUserSettingsCommandError(output, context); + } + const contents: ListUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + nextToken: undefined, + userSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.nextToken !== undefined && data.nextToken !== null) { + contents.nextToken = __expectString(data.nextToken); + } + if (data.userSettings !== undefined && data.userSettings !== null) { + contents.userSettings = deserializeAws_restJson1UserSettingsList(data.userSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1ListUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1TagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1TagResourceCommandError(output, context); + } + const contents: TagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1TagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "TooManyTagsException": + case "com.amazonaws.workspacesweb#TooManyTagsException": + response = { + ...(await deserializeAws_restJson1TooManyTagsExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UntagResourceCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UntagResourceCommandError(output, context); + } + const contents: UntagResourceCommandOutput = { + $metadata: deserializeMetadata(output), + }; + await collectBody(output.body, context); + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UntagResourceCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateBrowserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateBrowserSettingsCommandError(output, context); + } + const contents: UpdateBrowserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + browserSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.browserSettings !== undefined && data.browserSettings !== null) { + contents.browserSettings = deserializeAws_restJson1BrowserSettings(data.browserSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateBrowserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateIdentityProviderCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateIdentityProviderCommandError(output, context); + } + const contents: UpdateIdentityProviderCommandOutput = { + $metadata: deserializeMetadata(output), + identityProvider: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.identityProvider !== undefined && data.identityProvider !== null) { + contents.identityProvider = deserializeAws_restJson1IdentityProvider(data.identityProvider, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateIdentityProviderCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateNetworkSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateNetworkSettingsCommandError(output, context); + } + const contents: UpdateNetworkSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + networkSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.networkSettings !== undefined && data.networkSettings !== null) { + contents.networkSettings = deserializeAws_restJson1NetworkSettings(data.networkSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateNetworkSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdatePortalCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdatePortalCommandError(output, context); + } + const contents: UpdatePortalCommandOutput = { + $metadata: deserializeMetadata(output), + portal: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.portal !== undefined && data.portal !== null) { + contents.portal = deserializeAws_restJson1Portal(data.portal, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdatePortalCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateTrustStoreCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateTrustStoreCommandError(output, context); + } + const contents: UpdateTrustStoreCommandOutput = { + $metadata: deserializeMetadata(output), + trustStoreArn: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.trustStoreArn !== undefined && data.trustStoreArn !== null) { + contents.trustStoreArn = __expectString(data.trustStoreArn); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateTrustStoreCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ServiceQuotaExceededException": + case "com.amazonaws.workspacesweb#ServiceQuotaExceededException": + response = { + ...(await deserializeAws_restJson1ServiceQuotaExceededExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +export const deserializeAws_restJson1UpdateUserSettingsCommand = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + if (output.statusCode !== 200 && output.statusCode >= 300) { + return deserializeAws_restJson1UpdateUserSettingsCommandError(output, context); + } + const contents: UpdateUserSettingsCommandOutput = { + $metadata: deserializeMetadata(output), + userSettings: undefined, + }; + const data: { [key: string]: any } = __expectNonNull(__expectObject(await parseBody(output.body, context)), "body"); + if (data.userSettings !== undefined && data.userSettings !== null) { + contents.userSettings = deserializeAws_restJson1UserSettings(data.userSettings, context); + } + return Promise.resolve(contents); +}; + +const deserializeAws_restJson1UpdateUserSettingsCommandError = async ( + output: __HttpResponse, + context: __SerdeContext +): Promise => { + const parsedOutput: any = { + ...output, + body: await parseBody(output.body, context), + }; + let response: __SmithyException & __MetadataBearer & { [key: string]: any }; + let errorCode = "UnknownError"; + errorCode = loadRestJsonErrorCode(output, parsedOutput.body); + switch (errorCode) { + case "AccessDeniedException": + case "com.amazonaws.workspacesweb#AccessDeniedException": + response = { + ...(await deserializeAws_restJson1AccessDeniedExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "InternalServerException": + case "com.amazonaws.workspacesweb#InternalServerException": + response = { + ...(await deserializeAws_restJson1InternalServerExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ResourceNotFoundException": + case "com.amazonaws.workspacesweb#ResourceNotFoundException": + response = { + ...(await deserializeAws_restJson1ResourceNotFoundExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ThrottlingException": + case "com.amazonaws.workspacesweb#ThrottlingException": + response = { + ...(await deserializeAws_restJson1ThrottlingExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + case "ValidationException": + case "com.amazonaws.workspacesweb#ValidationException": + response = { + ...(await deserializeAws_restJson1ValidationExceptionResponse(parsedOutput, context)), + name: errorCode, + $metadata: deserializeMetadata(output), + }; + break; + default: + const parsedBody = parsedOutput.body; + errorCode = parsedBody.code || parsedBody.Code || errorCode; + response = { + ...parsedBody, + name: `${errorCode}`, + message: parsedBody.message || parsedBody.Message || errorCode, + $fault: "client", + $metadata: deserializeMetadata(output), + } as any; + } + const message = response.message || response.Message || errorCode; + response.message = message; + delete response.Message; + return Promise.reject(Object.assign(new Error(message), response)); +}; + +const deserializeAws_restJson1AccessDeniedExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: AccessDeniedException = { + name: "AccessDeniedException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ConflictExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ConflictException = { + name: "ConflictException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1InternalServerExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: InternalServerException = { + name: "InternalServerException", + $fault: "server", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + retryAfterSeconds: undefined, + }; + if (parsedOutput.headers["retry-after"] !== undefined) { + contents.retryAfterSeconds = __strictParseInt32(parsedOutput.headers["retry-after"]); + } + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + return contents; +}; + +const deserializeAws_restJson1ResourceNotFoundExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ResourceNotFoundException = { + name: "ResourceNotFoundException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceId: undefined, + resourceType: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + return contents; +}; + +const deserializeAws_restJson1ServiceQuotaExceededExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ServiceQuotaExceededException = { + name: "ServiceQuotaExceededException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + quotaCode: undefined, + resourceId: undefined, + resourceType: undefined, + serviceCode: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.quotaCode !== undefined && data.quotaCode !== null) { + contents.quotaCode = __expectString(data.quotaCode); + } + if (data.resourceId !== undefined && data.resourceId !== null) { + contents.resourceId = __expectString(data.resourceId); + } + if (data.resourceType !== undefined && data.resourceType !== null) { + contents.resourceType = __expectString(data.resourceType); + } + if (data.serviceCode !== undefined && data.serviceCode !== null) { + contents.serviceCode = __expectString(data.serviceCode); + } + return contents; +}; + +const deserializeAws_restJson1ThrottlingExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ThrottlingException = { + name: "ThrottlingException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + quotaCode: undefined, + retryAfterSeconds: undefined, + serviceCode: undefined, + }; + if (parsedOutput.headers["retry-after"] !== undefined) { + contents.retryAfterSeconds = __strictParseInt32(parsedOutput.headers["retry-after"]); + } + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.quotaCode !== undefined && data.quotaCode !== null) { + contents.quotaCode = __expectString(data.quotaCode); + } + if (data.serviceCode !== undefined && data.serviceCode !== null) { + contents.serviceCode = __expectString(data.serviceCode); + } + return contents; +}; + +const deserializeAws_restJson1TooManyTagsExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: TooManyTagsException = { + name: "TooManyTagsException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + message: undefined, + resourceName: undefined, + }; + const data: any = parsedOutput.body; + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.resourceName !== undefined && data.resourceName !== null) { + contents.resourceName = __expectString(data.resourceName); + } + return contents; +}; + +const deserializeAws_restJson1ValidationExceptionResponse = async ( + parsedOutput: any, + context: __SerdeContext +): Promise => { + const contents: ValidationException = { + name: "ValidationException", + $fault: "client", + $metadata: deserializeMetadata(parsedOutput), + fieldList: undefined, + message: undefined, + reason: undefined, + }; + const data: any = parsedOutput.body; + if (data.fieldList !== undefined && data.fieldList !== null) { + contents.fieldList = deserializeAws_restJson1ValidationExceptionFieldList(data.fieldList, context); + } + if (data.message !== undefined && data.message !== null) { + contents.message = __expectString(data.message); + } + if (data.reason !== undefined && data.reason !== null) { + contents.reason = __expectString(data.reason); + } + return contents; +}; + +const serializeAws_restJson1CertificateList = (input: Uint8Array[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return context.base64Encoder(entry); + }); +}; + +const serializeAws_restJson1CertificateThumbprintList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1EncryptionContextMap = (input: { [key: string]: string }, context: __SerdeContext): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1IdentityProviderDetails = ( + input: { [key: string]: string }, + context: __SerdeContext +): any => { + return Object.entries(input).reduce((acc: { [key: string]: any }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: value, + }; + }, {}); +}; + +const serializeAws_restJson1SecurityGroupIdList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1SubnetIdList = (input: string[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return entry; + }); +}; + +const serializeAws_restJson1Tag = (input: Tag, context: __SerdeContext): any => { + return { + ...(input.Key !== undefined && input.Key !== null && { Key: input.Key }), + ...(input.Value !== undefined && input.Value !== null && { Value: input.Value }), + }; +}; + +const serializeAws_restJson1TagList = (input: Tag[], context: __SerdeContext): any => { + return input + .filter((e: any) => e != null) + .map((entry) => { + if (entry === null) { + return null as any; + } + return serializeAws_restJson1Tag(entry, context); + }); +}; + +const deserializeAws_restJson1ArnList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1BrowserSettings = (output: any, context: __SerdeContext): BrowserSettings => { + return { + associatedPortalArns: + output.associatedPortalArns !== undefined && output.associatedPortalArns !== null + ? deserializeAws_restJson1ArnList(output.associatedPortalArns, context) + : undefined, + browserPolicy: __expectString(output.browserPolicy), + browserSettingsArn: __expectString(output.browserSettingsArn), + } as any; +}; + +const deserializeAws_restJson1BrowserSettingsList = ( + output: any, + context: __SerdeContext +): BrowserSettingsSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1BrowserSettingsSummary(entry, context); + }); +}; + +const deserializeAws_restJson1BrowserSettingsSummary = ( + output: any, + context: __SerdeContext +): BrowserSettingsSummary => { + return { + browserSettingsArn: __expectString(output.browserSettingsArn), + } as any; +}; + +const deserializeAws_restJson1Certificate = (output: any, context: __SerdeContext): Certificate => { + return { + body: output.body !== undefined && output.body !== null ? context.base64Decoder(output.body) : undefined, + issuer: __expectString(output.issuer), + notValidAfter: + output.notValidAfter !== undefined && output.notValidAfter !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.notValidAfter))) + : undefined, + notValidBefore: + output.notValidBefore !== undefined && output.notValidBefore !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.notValidBefore))) + : undefined, + subject: __expectString(output.subject), + thumbprint: __expectString(output.thumbprint), + } as any; +}; + +const deserializeAws_restJson1CertificateSummary = (output: any, context: __SerdeContext): CertificateSummary => { + return { + issuer: __expectString(output.issuer), + notValidAfter: + output.notValidAfter !== undefined && output.notValidAfter !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.notValidAfter))) + : undefined, + notValidBefore: + output.notValidBefore !== undefined && output.notValidBefore !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.notValidBefore))) + : undefined, + subject: __expectString(output.subject), + thumbprint: __expectString(output.thumbprint), + } as any; +}; + +const deserializeAws_restJson1CertificateSummaryList = (output: any, context: __SerdeContext): CertificateSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1CertificateSummary(entry, context); + }); +}; + +const deserializeAws_restJson1IdentityProvider = (output: any, context: __SerdeContext): IdentityProvider => { + return { + identityProviderArn: __expectString(output.identityProviderArn), + identityProviderDetails: + output.identityProviderDetails !== undefined && output.identityProviderDetails !== null + ? deserializeAws_restJson1IdentityProviderDetails(output.identityProviderDetails, context) + : undefined, + identityProviderName: __expectString(output.identityProviderName), + identityProviderType: __expectString(output.identityProviderType), + } as any; +}; + +const deserializeAws_restJson1IdentityProviderDetails = ( + output: any, + context: __SerdeContext +): { [key: string]: string } => { + return Object.entries(output).reduce((acc: { [key: string]: string }, [key, value]: [string, any]) => { + if (value === null) { + return acc; + } + return { + ...acc, + [key]: __expectString(value) as any, + }; + }, {}); +}; + +const deserializeAws_restJson1IdentityProviderList = ( + output: any, + context: __SerdeContext +): IdentityProviderSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1IdentityProviderSummary(entry, context); + }); +}; + +const deserializeAws_restJson1IdentityProviderSummary = ( + output: any, + context: __SerdeContext +): IdentityProviderSummary => { + return { + identityProviderArn: __expectString(output.identityProviderArn), + identityProviderName: __expectString(output.identityProviderName), + identityProviderType: __expectString(output.identityProviderType), + } as any; +}; + +const deserializeAws_restJson1NetworkSettings = (output: any, context: __SerdeContext): NetworkSettings => { + return { + associatedPortalArns: + output.associatedPortalArns !== undefined && output.associatedPortalArns !== null + ? deserializeAws_restJson1ArnList(output.associatedPortalArns, context) + : undefined, + networkSettingsArn: __expectString(output.networkSettingsArn), + securityGroupIds: + output.securityGroupIds !== undefined && output.securityGroupIds !== null + ? deserializeAws_restJson1SecurityGroupIdList(output.securityGroupIds, context) + : undefined, + subnetIds: + output.subnetIds !== undefined && output.subnetIds !== null + ? deserializeAws_restJson1SubnetIdList(output.subnetIds, context) + : undefined, + vpcId: __expectString(output.vpcId), + } as any; +}; + +const deserializeAws_restJson1NetworkSettingsList = ( + output: any, + context: __SerdeContext +): NetworkSettingsSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1NetworkSettingsSummary(entry, context); + }); +}; + +const deserializeAws_restJson1NetworkSettingsSummary = ( + output: any, + context: __SerdeContext +): NetworkSettingsSummary => { + return { + networkSettingsArn: __expectString(output.networkSettingsArn), + vpcId: __expectString(output.vpcId), + } as any; +}; + +const deserializeAws_restJson1Portal = (output: any, context: __SerdeContext): Portal => { + return { + browserSettingsArn: __expectString(output.browserSettingsArn), + browserType: __expectString(output.browserType), + creationDate: + output.creationDate !== undefined && output.creationDate !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDate))) + : undefined, + displayName: __expectString(output.displayName), + networkSettingsArn: __expectString(output.networkSettingsArn), + portalArn: __expectString(output.portalArn), + portalEndpoint: __expectString(output.portalEndpoint), + portalStatus: __expectString(output.portalStatus), + rendererType: __expectString(output.rendererType), + statusReason: __expectString(output.statusReason), + trustStoreArn: __expectString(output.trustStoreArn), + userSettingsArn: __expectString(output.userSettingsArn), + } as any; +}; + +const deserializeAws_restJson1PortalList = (output: any, context: __SerdeContext): PortalSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1PortalSummary(entry, context); + }); +}; + +const deserializeAws_restJson1PortalSummary = (output: any, context: __SerdeContext): PortalSummary => { + return { + browserSettingsArn: __expectString(output.browserSettingsArn), + browserType: __expectString(output.browserType), + creationDate: + output.creationDate !== undefined && output.creationDate !== null + ? __expectNonNull(__parseEpochTimestamp(__expectNumber(output.creationDate))) + : undefined, + displayName: __expectString(output.displayName), + networkSettingsArn: __expectString(output.networkSettingsArn), + portalArn: __expectString(output.portalArn), + portalEndpoint: __expectString(output.portalEndpoint), + portalStatus: __expectString(output.portalStatus), + rendererType: __expectString(output.rendererType), + trustStoreArn: __expectString(output.trustStoreArn), + userSettingsArn: __expectString(output.userSettingsArn), + } as any; +}; + +const deserializeAws_restJson1SecurityGroupIdList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1SubnetIdList = (output: any, context: __SerdeContext): string[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return __expectString(entry) as any; + }); +}; + +const deserializeAws_restJson1Tag = (output: any, context: __SerdeContext): Tag => { + return { + Key: __expectString(output.Key), + Value: __expectString(output.Value), + } as any; +}; + +const deserializeAws_restJson1TagList = (output: any, context: __SerdeContext): Tag[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1Tag(entry, context); + }); +}; + +const deserializeAws_restJson1TrustStore = (output: any, context: __SerdeContext): TrustStore => { + return { + associatedPortalArns: + output.associatedPortalArns !== undefined && output.associatedPortalArns !== null + ? deserializeAws_restJson1ArnList(output.associatedPortalArns, context) + : undefined, + trustStoreArn: __expectString(output.trustStoreArn), + } as any; +}; + +const deserializeAws_restJson1TrustStoreSummary = (output: any, context: __SerdeContext): TrustStoreSummary => { + return { + trustStoreArn: __expectString(output.trustStoreArn), + } as any; +}; + +const deserializeAws_restJson1TrustStoreSummaryList = (output: any, context: __SerdeContext): TrustStoreSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1TrustStoreSummary(entry, context); + }); +}; + +const deserializeAws_restJson1UserSettings = (output: any, context: __SerdeContext): UserSettings => { + return { + associatedPortalArns: + output.associatedPortalArns !== undefined && output.associatedPortalArns !== null + ? deserializeAws_restJson1ArnList(output.associatedPortalArns, context) + : undefined, + copyAllowed: __expectString(output.copyAllowed), + downloadAllowed: __expectString(output.downloadAllowed), + pasteAllowed: __expectString(output.pasteAllowed), + printAllowed: __expectString(output.printAllowed), + uploadAllowed: __expectString(output.uploadAllowed), + userSettingsArn: __expectString(output.userSettingsArn), + } as any; +}; + +const deserializeAws_restJson1UserSettingsList = (output: any, context: __SerdeContext): UserSettingsSummary[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1UserSettingsSummary(entry, context); + }); +}; + +const deserializeAws_restJson1UserSettingsSummary = (output: any, context: __SerdeContext): UserSettingsSummary => { + return { + copyAllowed: __expectString(output.copyAllowed), + downloadAllowed: __expectString(output.downloadAllowed), + pasteAllowed: __expectString(output.pasteAllowed), + printAllowed: __expectString(output.printAllowed), + uploadAllowed: __expectString(output.uploadAllowed), + userSettingsArn: __expectString(output.userSettingsArn), + } as any; +}; + +const deserializeAws_restJson1ValidationExceptionField = ( + output: any, + context: __SerdeContext +): ValidationExceptionField => { + return { + message: __expectString(output.message), + name: __expectString(output.name), + } as any; +}; + +const deserializeAws_restJson1ValidationExceptionFieldList = ( + output: any, + context: __SerdeContext +): ValidationExceptionField[] => { + return (output || []) + .filter((e: any) => e != null) + .map((entry: any) => { + if (entry === null) { + return null as any; + } + return deserializeAws_restJson1ValidationExceptionField(entry, context); + }); +}; + +const deserializeMetadata = (output: __HttpResponse): __ResponseMetadata => ({ + httpStatusCode: output.statusCode, + requestId: output.headers["x-amzn-requestid"] ?? output.headers["x-amzn-request-id"], + extendedRequestId: output.headers["x-amz-id-2"], + cfId: output.headers["x-amz-cf-id"], +}); + +// Collect low-level response body stream to Uint8Array. +const collectBody = (streamBody: any = new Uint8Array(), context: __SerdeContext): Promise => { + if (streamBody instanceof Uint8Array) { + return Promise.resolve(streamBody); + } + return context.streamCollector(streamBody) || Promise.resolve(new Uint8Array()); +}; + +// Encode Uint8Array data into string with utf-8. +const collectBodyString = (streamBody: any, context: __SerdeContext): Promise => + collectBody(streamBody, context).then((body) => context.utf8Encoder(body)); + +const isSerializableHeaderValue = (value: any): boolean => + value !== undefined && + value !== null && + value !== "" && + (!Object.getOwnPropertyNames(value).includes("length") || value.length != 0) && + (!Object.getOwnPropertyNames(value).includes("size") || value.size != 0); + +const parseBody = (streamBody: any, context: __SerdeContext): any => + collectBodyString(streamBody, context).then((encoded) => { + if (encoded.length) { + return JSON.parse(encoded); + } + return {}; + }); + +/** + * Load an error code for the aws.rest-json-1.1 protocol. + */ +const loadRestJsonErrorCode = (output: __HttpResponse, data: any): string => { + const findKey = (object: any, key: string) => Object.keys(object).find((k) => k.toLowerCase() === key.toLowerCase()); + + const sanitizeErrorCode = (rawValue: string): string => { + let cleanValue = rawValue; + if (cleanValue.indexOf(":") >= 0) { + cleanValue = cleanValue.split(":")[0]; + } + if (cleanValue.indexOf("#") >= 0) { + cleanValue = cleanValue.split("#")[1]; + } + return cleanValue; + }; + + const headerKey = findKey(output.headers, "x-amzn-errortype"); + if (headerKey !== undefined) { + return sanitizeErrorCode(output.headers[headerKey]); + } + + if (data.code !== undefined) { + return sanitizeErrorCode(data.code); + } + + if (data["__type"] !== undefined) { + return sanitizeErrorCode(data["__type"]); + } + + return ""; +}; diff --git a/clients/client-workspaces-web/src/runtimeConfig.browser.ts b/clients/client-workspaces-web/src/runtimeConfig.browser.ts new file mode 100644 index 000000000000..cf6e331df1c3 --- /dev/null +++ b/clients/client-workspaces-web/src/runtimeConfig.browser.ts @@ -0,0 +1,44 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { Sha256 } from "@aws-crypto/sha256-browser"; +import { DEFAULT_USE_DUALSTACK_ENDPOINT, DEFAULT_USE_FIPS_ENDPOINT } from "@aws-sdk/config-resolver"; +import { FetchHttpHandler, streamCollector } from "@aws-sdk/fetch-http-handler"; +import { invalidProvider } from "@aws-sdk/invalid-dependency"; +import { DEFAULT_MAX_ATTEMPTS, DEFAULT_RETRY_MODE } from "@aws-sdk/middleware-retry"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-browser"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-browser"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-browser"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-browser"; +import { WorkSpacesWebClientConfig } from "./WorkSpacesWebClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: WorkSpacesWebClientConfig) => { + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "browser", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? ((_: unknown) => () => Promise.reject(new Error("Credential is missing"))), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? DEFAULT_MAX_ATTEMPTS, + region: config?.region ?? invalidProvider("Region is missing"), + requestHandler: config?.requestHandler ?? new FetchHttpHandler(), + retryMode: config?.retryMode ?? (() => Promise.resolve(DEFAULT_RETRY_MODE)), + sha256: config?.sha256 ?? Sha256, + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? (() => Promise.resolve(DEFAULT_USE_DUALSTACK_ENDPOINT)), + useFipsEndpoint: config?.useFipsEndpoint ?? (() => Promise.resolve(DEFAULT_USE_FIPS_ENDPOINT)), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-workspaces-web/src/runtimeConfig.native.ts b/clients/client-workspaces-web/src/runtimeConfig.native.ts new file mode 100644 index 000000000000..b8cf62c9050e --- /dev/null +++ b/clients/client-workspaces-web/src/runtimeConfig.native.ts @@ -0,0 +1,17 @@ +import { Sha256 } from "@aws-crypto/sha256-js"; + +import { getRuntimeConfig as getBrowserRuntimeConfig } from "./runtimeConfig.browser"; +import { WorkSpacesWebClientConfig } from "./WorkSpacesWebClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: WorkSpacesWebClientConfig) => { + const browserDefaults = getBrowserRuntimeConfig(config); + return { + ...browserDefaults, + ...config, + runtime: "react-native", + sha256: config?.sha256 ?? Sha256, + }; +}; diff --git a/clients/client-workspaces-web/src/runtimeConfig.shared.ts b/clients/client-workspaces-web/src/runtimeConfig.shared.ts new file mode 100644 index 000000000000..0c4997d044b3 --- /dev/null +++ b/clients/client-workspaces-web/src/runtimeConfig.shared.ts @@ -0,0 +1,17 @@ +import { Logger as __Logger } from "@aws-sdk/types"; +import { parseUrl } from "@aws-sdk/url-parser"; + +import { defaultRegionInfoProvider } from "./endpoints"; +import { WorkSpacesWebClientConfig } from "./WorkSpacesWebClient"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: WorkSpacesWebClientConfig) => ({ + apiVersion: "2020-07-08", + disableHostPrefix: config?.disableHostPrefix ?? false, + logger: config?.logger ?? ({} as __Logger), + regionInfoProvider: config?.regionInfoProvider ?? defaultRegionInfoProvider, + serviceId: config?.serviceId ?? "WorkSpaces Web", + urlParser: config?.urlParser ?? parseUrl, +}); diff --git a/clients/client-workspaces-web/src/runtimeConfig.ts b/clients/client-workspaces-web/src/runtimeConfig.ts new file mode 100644 index 000000000000..3e69902ba716 --- /dev/null +++ b/clients/client-workspaces-web/src/runtimeConfig.ts @@ -0,0 +1,53 @@ +// @ts-ignore: package.json will be imported from dist folders +import packageInfo from "../package.json"; // eslint-disable-line + +import { decorateDefaultCredentialProvider } from "@aws-sdk/client-sts"; +import { + NODE_REGION_CONFIG_FILE_OPTIONS, + NODE_REGION_CONFIG_OPTIONS, + NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS, + NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS, +} from "@aws-sdk/config-resolver"; +import { defaultProvider as credentialDefaultProvider } from "@aws-sdk/credential-provider-node"; +import { Hash } from "@aws-sdk/hash-node"; +import { NODE_MAX_ATTEMPT_CONFIG_OPTIONS, NODE_RETRY_MODE_CONFIG_OPTIONS } from "@aws-sdk/middleware-retry"; +import { loadConfig as loadNodeConfig } from "@aws-sdk/node-config-provider"; +import { NodeHttpHandler, streamCollector } from "@aws-sdk/node-http-handler"; +import { fromBase64, toBase64 } from "@aws-sdk/util-base64-node"; +import { calculateBodyLength } from "@aws-sdk/util-body-length-node"; +import { defaultUserAgent } from "@aws-sdk/util-user-agent-node"; +import { fromUtf8, toUtf8 } from "@aws-sdk/util-utf8-node"; +import { WorkSpacesWebClientConfig } from "./WorkSpacesWebClient"; +import { getRuntimeConfig as getSharedRuntimeConfig } from "./runtimeConfig.shared"; +import { emitWarningIfUnsupportedVersion } from "@aws-sdk/smithy-client"; + +/** + * @internal + */ +export const getRuntimeConfig = (config: WorkSpacesWebClientConfig) => { + emitWarningIfUnsupportedVersion(process.version); + const clientSharedValues = getSharedRuntimeConfig(config); + return { + ...clientSharedValues, + ...config, + runtime: "node", + base64Decoder: config?.base64Decoder ?? fromBase64, + base64Encoder: config?.base64Encoder ?? toBase64, + bodyLengthChecker: config?.bodyLengthChecker ?? calculateBodyLength, + credentialDefaultProvider: + config?.credentialDefaultProvider ?? decorateDefaultCredentialProvider(credentialDefaultProvider), + defaultUserAgentProvider: + config?.defaultUserAgentProvider ?? + defaultUserAgent({ serviceId: clientSharedValues.serviceId, clientVersion: packageInfo.version }), + maxAttempts: config?.maxAttempts ?? loadNodeConfig(NODE_MAX_ATTEMPT_CONFIG_OPTIONS), + region: config?.region ?? loadNodeConfig(NODE_REGION_CONFIG_OPTIONS, NODE_REGION_CONFIG_FILE_OPTIONS), + requestHandler: config?.requestHandler ?? new NodeHttpHandler(), + retryMode: config?.retryMode ?? loadNodeConfig(NODE_RETRY_MODE_CONFIG_OPTIONS), + sha256: config?.sha256 ?? Hash.bind(null, "sha256"), + streamCollector: config?.streamCollector ?? streamCollector, + useDualstackEndpoint: config?.useDualstackEndpoint ?? loadNodeConfig(NODE_USE_DUALSTACK_ENDPOINT_CONFIG_OPTIONS), + useFipsEndpoint: config?.useFipsEndpoint ?? loadNodeConfig(NODE_USE_FIPS_ENDPOINT_CONFIG_OPTIONS), + utf8Decoder: config?.utf8Decoder ?? fromUtf8, + utf8Encoder: config?.utf8Encoder ?? toUtf8, + }; +}; diff --git a/clients/client-workspaces-web/tsconfig.es.json b/clients/client-workspaces-web/tsconfig.es.json new file mode 100644 index 000000000000..4c72364cd1a0 --- /dev/null +++ b/clients/client-workspaces-web/tsconfig.es.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "target": "es5", + "module": "esnext", + "moduleResolution": "node", + "lib": ["dom", "es5", "es2015.promise", "es2015.collection", "es2015.iterable", "es2015.symbol.wellknown"], + "outDir": "dist-es" + } +} diff --git a/clients/client-workspaces-web/tsconfig.json b/clients/client-workspaces-web/tsconfig.json new file mode 100644 index 000000000000..093039289c53 --- /dev/null +++ b/clients/client-workspaces-web/tsconfig.json @@ -0,0 +1,32 @@ +{ + "compilerOptions": { + "rootDir": "./src", + "alwaysStrict": true, + "target": "ES2018", + "module": "commonjs", + "strict": true, + "downlevelIteration": true, + "importHelpers": true, + "noEmitHelpers": true, + "incremental": true, + "resolveJsonModule": true, + "esModuleInterop": true, + "outDir": "dist-cjs", + "removeComments": true + }, + "typedocOptions": { + "exclude": ["**/node_modules/**", "**/*.spec.ts", "**/protocols/*.ts", "**/e2e/*.ts", "**/endpoints.ts"], + "excludeNotExported": true, + "excludePrivate": true, + "hideGenerator": true, + "ignoreCompilerErrors": true, + "includeDeclarations": true, + "stripInternal": true, + "readme": "README.md", + "mode": "file", + "out": "docs", + "theme": "minimal", + "plugin": ["@aws-sdk/service-client-documentation-generator"] + }, + "exclude": ["test/**/*"] +} diff --git a/clients/client-workspaces-web/tsconfig.types.json b/clients/client-workspaces-web/tsconfig.types.json new file mode 100644 index 000000000000..4c3dfa7b3d25 --- /dev/null +++ b/clients/client-workspaces-web/tsconfig.types.json @@ -0,0 +1,10 @@ +{ + "extends": "./tsconfig", + "compilerOptions": { + "removeComments": false, + "declaration": true, + "declarationDir": "dist-types", + "emitDeclarationOnly": true + }, + "exclude": ["test/**/*", "dist-types/**/*"] +} diff --git a/codegen/sdk-codegen/aws-models/accessanalyzer.json b/codegen/sdk-codegen/aws-models/accessanalyzer.json index 384ae927405b..878e99343b1e 100644 --- a/codegen/sdk-codegen/aws-models/accessanalyzer.json +++ b/codegen/sdk-codegen/aws-models/accessanalyzer.json @@ -3,6 +3,27 @@ "shapes": { "com.amazonaws.accessanalyzer#AccessAnalyzer": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "AccessAnalyzer", + "arnNamespace": "access-analyzer", + "cloudFormationName": "AccessAnalyzer", + "cloudTrailEventSource": "access-analyzer.amazonaws.com", + "endpointPrefix": "access-analyzer" + }, + "aws.auth#sigv4": { + "name": "access-analyzer" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": {}, + "smithy.api#documentation": "

                                                                      Identity and Access Management Access Analyzer helps identify potential resource-access risks by enabling you to\n identify any policies that grant access to an external principal. It does this by using\n logic-based reasoning to analyze resource-based policies in your Amazon Web Services environment. An\n external principal can be another Amazon Web Services account, a root user, an IAM user or role, a\n federated user, an Amazon Web Services service, or an anonymous user. You can also use IAM Access Analyzer to\n preview and validate public and cross-account access to your resources before deploying\n permissions changes. This guide describes the Identity and Access Management Access Analyzer operations that you can\n call programmatically. For general information about IAM Access Analyzer, see Identity and Access Management Access Analyzer in the IAM User Guide.

                                                                      \n

                                                                      To start using IAM Access Analyzer, you first need to create an analyzer.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#title": "Access Analyzer" + }, "version": "2019-11-01", "operations": [ { @@ -67,28 +88,7 @@ { "target": "com.amazonaws.accessanalyzer#Analyzer" } - ], - "traits": { - "aws.api#service": { - "sdkId": "AccessAnalyzer", - "arnNamespace": "access-analyzer", - "cloudFormationName": "AccessAnalyzer", - "cloudTrailEventSource": "access-analyzer.amazonaws.com", - "endpointPrefix": "access-analyzer" - }, - "aws.auth#sigv4": { - "name": "access-analyzer" - }, - "aws.protocols#restJson1": {}, - "smithy.api#cors": {}, - "smithy.api#documentation": "

                                                                      Identity and Access Management Access Analyzer helps identify potential resource-access risks by enabling you to\n identify any policies that grant access to an external principal. It does this by using\n logic-based reasoning to analyze resource-based policies in your Amazon Web Services environment. An\n external principal can be another Amazon Web Services account, a root user, an IAM user or role, a\n federated user, an Amazon Web Services service, or an anonymous user. You can also use IAM Access Analyzer to\n preview and validate public and cross-account access to your resources before deploying\n permissions changes. This guide describes the Identity and Access Management Access Analyzer operations that you can\n call programmatically. For general information about IAM Access Analyzer, see Identity and Access Management Access Analyzer in the IAM User Guide.

                                                                      \n

                                                                      To start using IAM Access Analyzer, you first need to create an analyzer.

                                                                      ", - "smithy.api#paginated": { - "inputToken": "nextToken", - "outputToken": "nextToken", - "pageSize": "maxResults" - }, - "smithy.api#title": "Access Analyzer" - } + ] }, "com.amazonaws.accessanalyzer#AccessDeniedException": { "type": "structure", @@ -3863,7 +3863,7 @@ "kmsKeyId": { "target": "com.amazonaws.accessanalyzer#SecretsManagerSecretKmsId", "traits": { - "smithy.api#documentation": "

                                                                      The proposed ARN, key ID, or alias of the KMS customer master key (CMK).

                                                                      " + "smithy.api#documentation": "

                                                                      The proposed ARN, key ID, or alias of the KMS key.

                                                                      " } }, "secretPolicy": { @@ -3874,7 +3874,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The configuration for a Secrets Manager secret. For more information, see CreateSecret.

                                                                      \n

                                                                      You can propose a configuration for a new secret or an existing secret that you own by\n specifying the secret policy and optional KMS encryption key. If the configuration is for\n an existing secret and you do not specify the secret policy, the access preview uses the\n existing policy for the secret. If the access preview is for a new resource and you do not\n specify the policy, the access preview assumes a secret without a policy. To propose\n deletion of an existing policy, you can specify an empty string. If the proposed\n configuration is for a new secret and you do not specify the KMS key ID, the access\n preview uses the default CMK of the Amazon Web Services account. If you specify an empty string for the\n KMS key ID, the access preview uses the default CMK of the Amazon Web Services account. For more\n information about secret policy limits, see Quotas for\n Secrets Manager..

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration for a Secrets Manager secret. For more information, see CreateSecret.

                                                                      \n

                                                                      You can propose a configuration for a new secret or an existing secret that you own by\n specifying the secret policy and optional KMS encryption key. If the configuration is for\n an existing secret and you do not specify the secret policy, the access preview uses the\n existing policy for the secret. If the access preview is for a new resource and you do not\n specify the policy, the access preview assumes a secret without a policy. To propose\n deletion of an existing policy, you can specify an empty string. If the proposed\n configuration is for a new secret and you do not specify the KMS key ID, the access\n preview uses the Amazon Web Services managed key aws/secretsmanager. If you specify an empty\n string for the KMS key ID, the access preview uses the Amazon Web Services managed key of the Amazon Web Services\n account. For more information about secret policy limits, see Quotas for\n Secrets Manager..

                                                                      " } }, "com.amazonaws.accessanalyzer#SecretsManagerSecretKmsId": { @@ -4691,9 +4691,38 @@ "smithy.api#documentation": "

                                                                      The type of policy to validate. Identity policies grant permissions to IAM principals.\n Identity policies include managed and inline policies for IAM roles, users, and groups.\n They also include service-control policies (SCPs) that are attached to an Amazon Web Services\n organization, organizational unit (OU), or an account.

                                                                      \n

                                                                      Resource policies grant permissions on Amazon Web Services resources. Resource policies include trust\n policies for IAM roles and bucket policies for Amazon S3 buckets. You can provide a generic\n input such as identity policy or resource policy or a specific input such as managed policy\n or Amazon S3 bucket policy.

                                                                      ", "smithy.api#required": {} } + }, + "validatePolicyResourceType": { + "target": "com.amazonaws.accessanalyzer#ValidatePolicyResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of resource to attach to your resource policy. Specify a value for the policy\n validation resource type only if the policy type is RESOURCE_POLICY. For\n example, to validate a resource policy to attach to an Amazon S3 bucket, you can choose\n AWS::S3::Bucket for the policy validation resource type.

                                                                      \n

                                                                      For resource types not supported as valid values, IAM Access Analyzer runs policy checks that\n apply to all resource policies. For example, to validate a resource policy to attach to a\n KMS key, do not specify a value for the policy validation resource type and IAM Access Analyzer\n will run policy checks that apply to all resource policies.

                                                                      " + } } } }, + "com.amazonaws.accessanalyzer#ValidatePolicyResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS::S3::Bucket", + "name": "S3_BUCKET" + }, + { + "value": "AWS::S3::AccessPoint", + "name": "S3_ACCESS_POINT" + }, + { + "value": "AWS::S3::MultiRegionAccessPoint", + "name": "S3_MULTI_REGION_ACCESS_POINT" + }, + { + "value": "AWS::S3ObjectLambda::AccessPoint", + "name": "S3_OBJECT_LAMBDA_ACCESS_POINT" + } + ] + } + }, "com.amazonaws.accessanalyzer#ValidatePolicyResponse": { "type": "structure", "members": { diff --git a/codegen/sdk-codegen/aws-models/backup-gateway.json b/codegen/sdk-codegen/aws-models/backup-gateway.json new file mode 100644 index 000000000000..03f8a2775162 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/backup-gateway.json @@ -0,0 +1,1543 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.backupgateway#AccessDeniedException": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      A description of why you have insufficient permissions.

                                                                      ", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.backupgateway#string" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The operation cannot proceed because you have insufficient permissions.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.backupgateway#ActivationKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 50 + }, + "smithy.api#pattern": "^[0-9a-zA-Z\\-]+$" + } + }, + "com.amazonaws.backupgateway#AssociateGatewayToServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#AssociateGatewayToServerInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#AssociateGatewayToServerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#ConflictException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates a backup gateway with your server. After you complete the association process,\n you can back up and restore your VMs through the gateway.

                                                                      " + } + }, + "com.amazonaws.backupgateway#AssociateGatewayToServerInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation\n to return a list of gateways for your account and Amazon Web Services Region.

                                                                      ", + "smithy.api#required": {} + } + }, + "ServerArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the server that hosts your virtual machines.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#AssociateGatewayToServerOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of a gateway.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#BackupOnPremises_v20210101": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Backup Gateway", + "arnNamespace": "backup-gateway", + "cloudFormationName": "BackupGateway", + "cloudTrailEventSource": "backup-gateway.amazonaws.com", + "endpointPrefix": "backup-gateway" + }, + "aws.auth#sigv4": { + "name": "backup-gateway" + }, + "aws.protocols#awsJson1_0": {}, + "smithy.api#documentation": "Backup gateway\n

                                                                      Backup gateway connects Backup to your hypervisor, so you can\n create, store, and restore backups of your virtual machines (VMs) anywhere, whether\n on-premises or in the VMware Cloud (VMC) on Amazon Web Services.

                                                                      \n

                                                                      Add on-premises resources by connecting to a hypervisor through a gateway. Backup will automatically discover the resources in your hypervisor.

                                                                      \n

                                                                      Use Backup to assign virtual or on-premises resources to a backup plan, or run\n on-demand backups. Once you have backed up your resources, you can view them and restore them\n like any resource supported by Backup.

                                                                      \n

                                                                      To download the Amazon Web Services software to get started, navigate to the Backup console, choose Gateways, then choose Create gateway.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + }, + "smithy.api#title": "AWS Backup Gateway" + }, + "version": "2021-01-01", + "operations": [ + { + "target": "com.amazonaws.backupgateway#ListTagsForResource" + }, + { + "target": "com.amazonaws.backupgateway#ListVirtualMachines" + }, + { + "target": "com.amazonaws.backupgateway#TagResource" + }, + { + "target": "com.amazonaws.backupgateway#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.backupgateway#GatewayResource" + }, + { + "target": "com.amazonaws.backupgateway#HypervisorResource" + } + ] + }, + "com.amazonaws.backupgateway#ConflictException": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      A description of why the operation is not supported.

                                                                      ", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.backupgateway#string" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The operation cannot proceed because it is not supported.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.backupgateway#CreateGateway": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#CreateGatewayInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#CreateGatewayOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a backup gateway. After you create a gateway, you can associate it with a server\n using the AssociateGatewayToServer operation.

                                                                      " + } + }, + "com.amazonaws.backupgateway#CreateGatewayInput": { + "type": "structure", + "members": { + "ActivationKey": { + "target": "com.amazonaws.backupgateway#ActivationKey", + "traits": { + "smithy.api#documentation": "

                                                                      The activation key of the created gateway.

                                                                      ", + "smithy.api#required": {} + } + }, + "GatewayDisplayName": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The display name of the created gateway.

                                                                      ", + "smithy.api#required": {} + } + }, + "GatewayType": { + "target": "com.amazonaws.backupgateway#GatewayType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of created gateway.

                                                                      ", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.backupgateway#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      A list of up to 50 tags to assign to the gateway. Each tag is a key-value pair.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#CreateGatewayOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway you create.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#DayOfMonth": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 31 + } + } + }, + "com.amazonaws.backupgateway#DayOfWeek": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 6 + } + } + }, + "com.amazonaws.backupgateway#DeleteGateway": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#DeleteGatewayInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#DeleteGatewayOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a backup gateway.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupgateway#DeleteGatewayInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway to delete.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#DeleteGatewayOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway you deleted.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#DeleteHypervisor": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#DeleteHypervisorInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#DeleteHypervisorOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#AccessDeniedException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a hypervisor.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.backupgateway#DeleteHypervisorInput": { + "type": "structure", + "members": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor to delete.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#DeleteHypervisorOutput": { + "type": "structure", + "members": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor you deleted.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#DisassociateGatewayFromServer": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#DisassociateGatewayFromServerInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#DisassociateGatewayFromServerOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#ConflictException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates a backup gateway from the specified server. After the disassociation process\n finishes, the gateway can no longer access the virtual machines on the server.

                                                                      " + } + }, + "com.amazonaws.backupgateway#DisassociateGatewayFromServerInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway to disassociate.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#DisassociateGatewayFromServerOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway you disassociated.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#Gateway": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway. Use the ListGateways operation\n to return a list of gateways for your account and Amazon Web Services Region.

                                                                      " + } + }, + "GatewayDisplayName": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The display name of the gateway.

                                                                      " + } + }, + "GatewayType": { + "target": "com.amazonaws.backupgateway#GatewayType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the gateway.

                                                                      " + } + }, + "HypervisorId": { + "target": "com.amazonaws.backupgateway#HypervisorId", + "traits": { + "smithy.api#documentation": "

                                                                      The hypervisor ID of the gateway.

                                                                      " + } + }, + "LastSeenTime": { + "target": "com.amazonaws.backupgateway#Time", + "traits": { + "smithy.api#documentation": "

                                                                      The last time Backup gateway communicated with the gateway, in Unix format and\n UTC time.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A gateway is an Backup Gateway appliance that runs on the customer's network\n to provide seamless connectivity to backup storage in the Amazon Web Services Cloud.

                                                                      " + } + }, + "com.amazonaws.backupgateway#GatewayArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 50, + "max": 500 + }, + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):backup-gateway(:[a-zA-Z-0-9]+){3}\\/[a-zA-Z-0-9]+$" + } + }, + "com.amazonaws.backupgateway#GatewayResource": { + "type": "resource", + "identifiers": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn" + } + }, + "create": { + "target": "com.amazonaws.backupgateway#CreateGateway" + }, + "update": { + "target": "com.amazonaws.backupgateway#UpdateGatewayInformation" + }, + "delete": { + "target": "com.amazonaws.backupgateway#DeleteGateway" + }, + "list": { + "target": "com.amazonaws.backupgateway#ListGateways" + }, + "operations": [ + { + "target": "com.amazonaws.backupgateway#AssociateGatewayToServer" + }, + { + "target": "com.amazonaws.backupgateway#DisassociateGatewayFromServer" + }, + { + "target": "com.amazonaws.backupgateway#PutMaintenanceStartTime" + }, + { + "target": "com.amazonaws.backupgateway#TestHypervisorConfiguration" + } + ], + "traits": { + "aws.cloudformation#cfnResource": { + "name": "Gateway" + } + } + }, + "com.amazonaws.backupgateway#GatewayType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BACKUP_VM", + "name": "BACKUP_VM" + } + ] + } + }, + "com.amazonaws.backupgateway#Gateways": { + "type": "list", + "member": { + "target": "com.amazonaws.backupgateway#Gateway" + } + }, + "com.amazonaws.backupgateway#Host": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 128 + }, + "smithy.api#pattern": "^.+$" + } + }, + "com.amazonaws.backupgateway#HourOfDay": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 23 + } + } + }, + "com.amazonaws.backupgateway#Hypervisor": { + "type": "structure", + "members": { + "Host": { + "target": "com.amazonaws.backupgateway#Host", + "traits": { + "smithy.api#documentation": "

                                                                      The server host of the hypervisor. This can be either an IP address or a fully-qualified\n domain name (FQDN).

                                                                      " + } + }, + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor.

                                                                      " + } + }, + "KmsKeyArn": { + "target": "com.amazonaws.backupgateway#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the Key Management Service used to encrypt the\n hypervisor.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the hypervisor.

                                                                      " + } + }, + "State": { + "target": "com.amazonaws.backupgateway#HypervisorState", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the hypervisor.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Represents the hypervisor's permissions to which the gateway will connect.

                                                                      \n

                                                                      A hypervisor is hardware, software, or firmware that creates and manages virtual machines,\n and allocates resources to them.

                                                                      " + } + }, + "com.amazonaws.backupgateway#HypervisorId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.backupgateway#HypervisorResource": { + "type": "resource", + "identifiers": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn" + } + }, + "create": { + "target": "com.amazonaws.backupgateway#ImportHypervisorConfiguration" + }, + "update": { + "target": "com.amazonaws.backupgateway#UpdateHypervisor" + }, + "delete": { + "target": "com.amazonaws.backupgateway#DeleteHypervisor" + }, + "list": { + "target": "com.amazonaws.backupgateway#ListHypervisors" + }, + "traits": { + "aws.cloudformation#cfnResource": { + "name": "Hypervisor" + } + } + }, + "com.amazonaws.backupgateway#HypervisorState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "ONLINE", + "name": "ONLINE" + }, + { + "value": "OFFLINE", + "name": "OFFLINE" + }, + { + "value": "ERROR", + "name": "ERROR" + } + ] + } + }, + "com.amazonaws.backupgateway#Hypervisors": { + "type": "list", + "member": { + "target": "com.amazonaws.backupgateway#Hypervisor" + } + }, + "com.amazonaws.backupgateway#ImportHypervisorConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#ImportHypervisorConfigurationInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#ImportHypervisorConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#AccessDeniedException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Connect to a hypervisor by importing its configuration.

                                                                      " + } + }, + "com.amazonaws.backupgateway#ImportHypervisorConfigurationInput": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the hypervisor.

                                                                      ", + "smithy.api#required": {} + } + }, + "Host": { + "target": "com.amazonaws.backupgateway#Host", + "traits": { + "smithy.api#documentation": "

                                                                      The server host of the hypervisor. This can be either an IP address or a fully-qualified\n domain name (FQDN).

                                                                      ", + "smithy.api#required": {} + } + }, + "Username": { + "target": "com.amazonaws.backupgateway#Username", + "traits": { + "smithy.api#documentation": "

                                                                      The username for the hypervisor.

                                                                      " + } + }, + "Password": { + "target": "com.amazonaws.backupgateway#Password", + "traits": { + "smithy.api#documentation": "

                                                                      The password for the hypervisor.

                                                                      " + } + }, + "KmsKeyArn": { + "target": "com.amazonaws.backupgateway#KmsKeyArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Key Management Service for the hypervisor.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.backupgateway#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      The tags of the hypervisor configuration to import.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ImportHypervisorConfigurationOutput": { + "type": "structure", + "members": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor you disassociated.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#InternalServerException": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      A description of which internal error occured.

                                                                      " + } + }, + "Message": { + "target": "com.amazonaws.backupgateway#string" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The operation did not succeed because an internal error occurred. Try again later.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.backupgateway#KmsKeyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 50, + "max": 500 + }, + "smithy.api#pattern": "^(^arn:(aws|aws-cn|aws-us-gov):kms:([a-zA-Z0-9-]+):([0-9]+):(key|alias)/(\\S+)$)|(^alias/(\\S+)$)$" + } + }, + "com.amazonaws.backupgateway#ListGateways": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#ListGatewaysInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#ListGatewaysOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists backup gateways owned by an Amazon Web Services account in an Amazon Web Services Region. The returned list is ordered by gateway Amazon Resource Name (ARN).

                                                                      ", + "smithy.api#paginated": { + "items": "Gateways" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupgateway#ListGatewaysInput": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.backupgateway#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of gateways to list.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return MaxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListGatewaysOutput": { + "type": "structure", + "members": { + "Gateways": { + "target": "com.amazonaws.backupgateway#Gateways", + "traits": { + "smithy.api#documentation": "

                                                                      A list of your gateways.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return maxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListHypervisors": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#ListHypervisorsInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#ListHypervisorsOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists your hypervisors.

                                                                      ", + "smithy.api#paginated": { + "items": "Hypervisors" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupgateway#ListHypervisorsInput": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.backupgateway#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of hypervisors to list.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return maxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListHypervisorsOutput": { + "type": "structure", + "members": { + "Hypervisors": { + "target": "com.amazonaws.backupgateway#Hypervisors", + "traits": { + "smithy.api#documentation": "

                                                                      A list of your Hypervisor objects, ordered by their Amazon Resource Names\n (ARNs).

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return maxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#ListTagsForResourceInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#ListTagsForResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the tags applied to the resource identified by its Amazon Resource Name\n (ARN).

                                                                      " + } + }, + "com.amazonaws.backupgateway#ListTagsForResourceInput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource's tags to list.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#ListTagsForResourceOutput": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource's tags that you listed.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.backupgateway#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      A list of the resource's tags.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListVirtualMachines": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#ListVirtualMachinesInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#ListVirtualMachinesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists your virtual machines.

                                                                      ", + "smithy.api#paginated": { + "items": "VirtualMachines" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.backupgateway#ListVirtualMachinesInput": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.backupgateway#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of virtual machines to list.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return maxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ListVirtualMachinesOutput": { + "type": "structure", + "members": { + "VirtualMachines": { + "target": "com.amazonaws.backupgateway#VirtualMachines", + "traits": { + "smithy.api#documentation": "

                                                                      A list of your VirtualMachine objects, ordered by their Amazon Resource Names\n (ARNs).

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.backupgateway#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The next item following a partial list of returned resources. For example, if a request is\n made to return maxResults number of resources, NextToken allows you\n to return more items in your list starting at the location pointed to by the next\n token.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.backupgateway#MinuteOfHour": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 59 + } + } + }, + "com.amazonaws.backupgateway#Name": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[a-zA-Z0-9-]*$" + } + }, + "com.amazonaws.backupgateway#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1000 + }, + "smithy.api#pattern": "^.+$" + } + }, + "com.amazonaws.backupgateway#Password": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[ -~]+$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.backupgateway#Path": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#pattern": "^[^\\x00]+$" + } + }, + "com.amazonaws.backupgateway#PutMaintenanceStartTime": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#PutMaintenanceStartTimeInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#PutMaintenanceStartTimeOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#ConflictException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Set the maintenance start time for a gateway.

                                                                      " + } + }, + "com.amazonaws.backupgateway#PutMaintenanceStartTimeInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for the gateway, used to specify its maintenance start\n time.

                                                                      ", + "smithy.api#required": {} + } + }, + "HourOfDay": { + "target": "com.amazonaws.backupgateway#HourOfDay", + "traits": { + "smithy.api#documentation": "

                                                                      The hour of the day to start maintenance on a gateway.

                                                                      ", + "smithy.api#required": {} + } + }, + "MinuteOfHour": { + "target": "com.amazonaws.backupgateway#MinuteOfHour", + "traits": { + "smithy.api#documentation": "

                                                                      The minute of the hour to start maintenance on a gateway.

                                                                      ", + "smithy.api#required": {} + } + }, + "DayOfWeek": { + "target": "com.amazonaws.backupgateway#DayOfWeek", + "traits": { + "smithy.api#documentation": "

                                                                      The day of the week to start maintenance on a gateway.

                                                                      " + } + }, + "DayOfMonth": { + "target": "com.amazonaws.backupgateway#DayOfMonth", + "traits": { + "smithy.api#documentation": "

                                                                      The day of the month start maintenance on a gateway.

                                                                      \n

                                                                      Valid values range from Sunday to Saturday.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#PutMaintenanceStartTimeOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of a gateway for which you set the maintenance start\n time.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#ResourceArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 50, + "max": 500 + }, + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):backup-gateway(:[a-zA-Z-0-9]+){3}\\/[a-zA-Z-0-9]+$" + } + }, + "com.amazonaws.backupgateway#ResourceNotFoundException": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      A description of which resource wasn't found.

                                                                      " + } + }, + "Message": { + "target": "com.amazonaws.backupgateway#string" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A resource that is required for the action wasn't found.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.backupgateway#ServerArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 50, + "max": 500 + }, + "smithy.api#pattern": "^arn:(aws|aws-cn|aws-us-gov):backup-gateway(:[a-zA-Z-0-9]+){3}\\/[a-zA-Z-0-9]+$" + } + }, + "com.amazonaws.backupgateway#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.backupgateway#TagKey", + "traits": { + "smithy.api#documentation": "

                                                                      The key part of a tag's key-value pair. The key can't start with aws:.

                                                                      ", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.backupgateway#TagValue", + "traits": { + "smithy.api#documentation": "

                                                                      The key part of a value's key-value pair.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A key-value pair you can use to manage, filter, and search for your resources. Allowed\n characters include UTF-8 letters, numbers, spaces, and the following characters: + - = . _ :\n /.

                                                                      " + } + }, + "com.amazonaws.backupgateway#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.backupgateway#TagKeys": { + "type": "list", + "member": { + "target": "com.amazonaws.backupgateway#TagKey" + } + }, + "com.amazonaws.backupgateway#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#TagResourceInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#TagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Tag the resource.

                                                                      " + } + }, + "com.amazonaws.backupgateway#TagResourceInput": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource to tag.

                                                                      ", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.backupgateway#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      A list of tags to assign to the resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#TagResourceOutput": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource you tagged.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[^\\x00]*$" + } + }, + "com.amazonaws.backupgateway#Tags": { + "type": "list", + "member": { + "target": "com.amazonaws.backupgateway#Tag" + } + }, + "com.amazonaws.backupgateway#TestHypervisorConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#TestHypervisorConfigurationInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#TestHypervisorConfigurationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#ConflictException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Tests your hypervisor configuration to validate that backup gateway can connect with the\n hypervisor and its resources.

                                                                      " + } + }, + "com.amazonaws.backupgateway#TestHypervisorConfigurationInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway to the hypervisor to test.

                                                                      ", + "smithy.api#required": {} + } + }, + "Host": { + "target": "com.amazonaws.backupgateway#Host", + "traits": { + "smithy.api#documentation": "

                                                                      The server host of the hypervisor. This can be either an IP address or a fully-qualified\n domain name (FQDN).

                                                                      ", + "smithy.api#required": {} + } + }, + "Username": { + "target": "com.amazonaws.backupgateway#Username", + "traits": { + "smithy.api#documentation": "

                                                                      The username for the hypervisor.

                                                                      " + } + }, + "Password": { + "target": "com.amazonaws.backupgateway#Password", + "traits": { + "smithy.api#documentation": "

                                                                      The password for the hypervisor.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#TestHypervisorConfigurationOutput": { + "type": "structure", + "members": {} + }, + "com.amazonaws.backupgateway#Time": { + "type": "timestamp" + }, + "com.amazonaws.backupgateway#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#UntagResourceInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#UntagResourceOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes tags from the resource.

                                                                      " + } + }, + "com.amazonaws.backupgateway#UntagResourceInput": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource from which to remove tags.

                                                                      ", + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.backupgateway#TagKeys", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys specifying which tags to remove.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.backupgateway#UntagResourceOutput": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource from which you removed tags.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#UpdateGatewayInformation": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#UpdateGatewayInformationInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#UpdateGatewayInformationOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#ConflictException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a gateway's name. Specify which gateway to update using the Amazon Resource Name\n (ARN) of the gateway in your request.

                                                                      " + } + }, + "com.amazonaws.backupgateway#UpdateGatewayInformationInput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "GatewayDisplayName": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The updated display name of the gateway.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#UpdateGatewayInformationOutput": { + "type": "structure", + "members": { + "GatewayArn": { + "target": "com.amazonaws.backupgateway#GatewayArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the gateway you updated.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#UpdateHypervisor": { + "type": "operation", + "input": { + "target": "com.amazonaws.backupgateway#UpdateHypervisorInput" + }, + "output": { + "target": "com.amazonaws.backupgateway#UpdateHypervisorOutput" + }, + "errors": [ + { + "target": "com.amazonaws.backupgateway#AccessDeniedException" + }, + { + "target": "com.amazonaws.backupgateway#InternalServerException" + }, + { + "target": "com.amazonaws.backupgateway#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.backupgateway#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a hypervisor metadata, including its host, username, and password. Specify which\n hypervisor to update using the Amazon Resource Name (ARN) of the hypervisor in your\n request.

                                                                      " + } + }, + "com.amazonaws.backupgateway#UpdateHypervisorInput": { + "type": "structure", + "members": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "Host": { + "target": "com.amazonaws.backupgateway#Host", + "traits": { + "smithy.api#documentation": "

                                                                      The updated host of the hypervisor. This can be either an IP address or a fully-qualified\n domain name (FQDN).

                                                                      " + } + }, + "Username": { + "target": "com.amazonaws.backupgateway#Username", + "traits": { + "smithy.api#documentation": "

                                                                      The updated username for the hypervisor.

                                                                      " + } + }, + "Password": { + "target": "com.amazonaws.backupgateway#Password", + "traits": { + "smithy.api#documentation": "

                                                                      The updated password for the hypervisor.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#UpdateHypervisorOutput": { + "type": "structure", + "members": { + "HypervisorArn": { + "target": "com.amazonaws.backupgateway#ServerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the hypervisor you updated.

                                                                      " + } + } + } + }, + "com.amazonaws.backupgateway#Username": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#pattern": "^[ -\\.0-\\[\\]-~]*[!-\\.0-\\[\\]-~][ -\\.0-\\[\\]-~]*$", + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.backupgateway#ValidationException": { + "type": "structure", + "members": { + "ErrorCode": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      A description of what caused the validation error.

                                                                      " + } + }, + "Message": { + "target": "com.amazonaws.backupgateway#string" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The operation did not succeed because a validation error occurred.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.backupgateway#VirtualMachine": { + "type": "structure", + "members": { + "HostName": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The host name of the virtual machine.

                                                                      " + } + }, + "HypervisorId": { + "target": "com.amazonaws.backupgateway#string", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the virtual machine's hypervisor.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.backupgateway#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the virtual machine.

                                                                      " + } + }, + "Path": { + "target": "com.amazonaws.backupgateway#Path", + "traits": { + "smithy.api#documentation": "

                                                                      The path of the virtual machine.

                                                                      " + } + }, + "ResourceArn": { + "target": "com.amazonaws.backupgateway#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the virtual machine.

                                                                      " + } + }, + "LastBackupDate": { + "target": "com.amazonaws.backupgateway#Time", + "traits": { + "smithy.api#documentation": "

                                                                      The most recent date a virtual machine was backed up, in Unix format and UTC time.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A virtual machine that is on a hypervisor.

                                                                      " + } + }, + "com.amazonaws.backupgateway#VirtualMachines": { + "type": "list", + "member": { + "target": "com.amazonaws.backupgateway#VirtualMachine" + } + }, + "com.amazonaws.backupgateway#string": { + "type": "string" + } + } +} diff --git a/codegen/sdk-codegen/aws-models/compute-optimizer.json b/codegen/sdk-codegen/aws-models/compute-optimizer.json index 9b91a52b335a..ed425c4fba2e 100644 --- a/codegen/sdk-codegen/aws-models/compute-optimizer.json +++ b/codegen/sdk-codegen/aws-models/compute-optimizer.json @@ -60,7 +60,7 @@ "statusReason": { "target": "com.amazonaws.computeoptimizer#StatusReason", "traits": { - "smithy.api#documentation": "

                                                                      The reason for the account enrollment status.

                                                                      \n\n

                                                                      For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

                                                                      " + "smithy.api#documentation": "

                                                                      The reason for the account enrollment status.

                                                                      \n

                                                                      For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

                                                                      " } }, "lastUpdatedTimestamp": { @@ -157,7 +157,7 @@ "finding": { "target": "com.amazonaws.computeoptimizer#Finding", "traits": { - "smithy.api#documentation": "

                                                                      The finding classification of the Auto Scaling group.

                                                                      \n\n

                                                                      Findings for Auto Scaling groups include:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n \n NotOptimized\n —An Auto Scaling group is considered not optimized when Compute Optimizer identifies a\n recommendation that can provide better performance for your workload.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An Auto Scaling\n group is considered optimized when Compute Optimizer determines that the group\n is correctly provisioned to run your workload based on the chosen instance type.\n For optimized resources, Compute Optimizer might recommend a new generation\n instance type.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The finding classification of the Auto Scaling group.

                                                                      \n

                                                                      Findings for Auto Scaling groups include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n NotOptimized\n —An Auto Scaling group is considered not optimized when Compute Optimizer identifies a\n recommendation that can provide better performance for your workload.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An Auto Scaling\n group is considered optimized when Compute Optimizer determines that the group\n is correctly provisioned to run your workload based on the chosen instance type.\n For optimized resources, Compute Optimizer might recommend a new generation\n instance type.

                                                                        \n
                                                                      • \n
                                                                      " } }, "utilizationMetrics": { @@ -187,7 +187,19 @@ "lastRefreshTimestamp": { "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#documentation": "

                                                                      The timestamp of when the Auto Scaling group recommendation was last\n refreshed.

                                                                      " + "smithy.api#documentation": "

                                                                      The timestamp of when the Auto Scaling group recommendation was last\n generated.

                                                                      " + } + }, + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "traits": { + "smithy.api#documentation": "

                                                                      The risk of the current Auto Scaling group not meeting the performance needs of\n its workloads. The higher the risk, the more likely the current Auto Scaling group\n configuration has insufficient capacity and cannot meet workload requirements.

                                                                      " + } + }, + "effectiveRecommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#EffectiveRecommendationPreferences", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the effective recommendation preferences for the Auto Scaling group.

                                                                      " } } }, @@ -207,19 +219,25 @@ "projectedUtilizationMetrics": { "target": "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics", "traits": { - "smithy.api#documentation": "

                                                                      An array of objects that describe the projected utilization metrics of the Auto Scaling group recommendation option.

                                                                      \n\n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      An array of objects that describe the projected utilization metrics of the Auto Scaling group recommendation option.

                                                                      \n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " } }, "performanceRisk": { "target": "com.amazonaws.computeoptimizer#PerformanceRisk", "traits": { - "smithy.api#documentation": "

                                                                      The performance risk of the Auto Scaling group configuration\n recommendation.

                                                                      \n\n

                                                                      Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

                                                                      \n\n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " + "smithy.api#documentation": "

                                                                      The performance risk of the Auto Scaling group configuration\n recommendation.

                                                                      \n

                                                                      Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

                                                                      \n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " } }, "rank": { "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

                                                                      The rank of the Auto Scaling group recommendation option.

                                                                      \n\n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + "smithy.api#documentation": "

                                                                      The rank of the Auto Scaling group recommendation option.

                                                                      \n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the savings opportunity for the Auto Scaling group\n recommendation option. Savings opportunity includes the estimated monthly savings amount\n and percentage.

                                                                      " } } }, @@ -244,8 +262,26 @@ }, "com.amazonaws.computeoptimizer#ComputeOptimizerService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Compute Optimizer", + "arnNamespace": "compute-optimizer", + "cloudFormationName": "ComputeOptimizer", + "cloudTrailEventSource": "computeoptimizer.amazonaws.com", + "endpointPrefix": "compute-optimizer" + }, + "aws.auth#sigv4": { + "name": "compute-optimizer" + }, + "aws.protocols#awsJson1_0": {}, + "smithy.api#documentation": "

                                                                      Compute Optimizer is a service that analyzes the configuration and utilization\n metrics of your Amazon Web Services compute resources, such as Amazon EC2\n instances, Amazon EC2 Auto Scaling groups, Lambda functions, and Amazon EBS volumes. It reports whether your resources are optimal, and generates\n optimization recommendations to reduce the cost and improve the performance of your\n workloads. Compute Optimizer also provides recent utilization metric data, in addition\n to projected utilization metric data for the recommendations, which you can use to\n evaluate which recommendation provides the best price-performance trade-off. The\n analysis of your usage patterns can help you decide when to move or resize your running\n resources, and still meet your performance and capacity requirements. For more\n information about Compute Optimizer, including the required permissions to use the\n service, see the Compute Optimizer User Guide.

                                                                      ", + "smithy.api#title": "AWS Compute Optimizer" + }, "version": "2019-11-01", "operations": [ + { + "target": "com.amazonaws.computeoptimizer#DeleteRecommendationPreferences" + }, { "target": "com.amazonaws.computeoptimizer#DescribeRecommendationExportJobs" }, @@ -273,6 +309,9 @@ { "target": "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetrics" }, + { + "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferences" + }, { "target": "com.amazonaws.computeoptimizer#GetEnrollmentStatus" }, @@ -282,28 +321,19 @@ { "target": "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendations" }, + { + "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferences" + }, { "target": "com.amazonaws.computeoptimizer#GetRecommendationSummaries" }, + { + "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferences" + }, { "target": "com.amazonaws.computeoptimizer#UpdateEnrollmentStatus" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Compute Optimizer", - "arnNamespace": "compute-optimizer", - "cloudFormationName": "ComputeOptimizer", - "cloudTrailEventSource": "computeoptimizer.amazonaws.com", - "endpointPrefix": "compute-optimizer" - }, - "aws.auth#sigv4": { - "name": "compute-optimizer" - }, - "aws.protocols#awsJson1_0": {}, - "smithy.api#documentation": "

                                                                      Compute Optimizer is a service that analyzes the configuration and utilization\n metrics of your Amazon Web Services compute resources, such as Amazon EC2\n instances, Amazon EC2 Auto Scaling groups, Lambda functions, and Amazon EBS volumes. It reports whether your resources are optimal, and generates\n optimization recommendations to reduce the cost and improve the performance of your\n workloads. Compute Optimizer also provides recent utilization metric data, in addition\n to projected utilization metric data for the recommendations, which you can use to\n evaluate which recommendation provides the best price-performance trade-off. The\n analysis of your usage patterns can help you decide when to move or resize your running\n resources, and still meet your performance and capacity requirements. For more\n information about Compute Optimizer, including the required permissions to use the\n service, see the Compute Optimizer User Guide.

                                                                      ", - "smithy.api#title": "AWS Compute Optimizer" - } + ] }, "com.amazonaws.computeoptimizer#CpuVendorArchitecture": { "type": "string", @@ -329,9 +359,146 @@ "com.amazonaws.computeoptimizer#CreationTimestamp": { "type": "timestamp" }, + "com.amazonaws.computeoptimizer#Currency": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "USD", + "name": "USD" + }, + { + "value": "CNY", + "name": "CNY" + } + ] + } + }, "com.amazonaws.computeoptimizer#CurrentInstanceType": { "type": "string" }, + "com.amazonaws.computeoptimizer#CurrentPerformanceRisk": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "VeryLow", + "name": "VERY_LOW" + }, + { + "value": "Low", + "name": "LOW" + }, + { + "value": "Medium", + "name": "MEDIUM" + }, + { + "value": "High", + "name": "HIGH" + } + ] + } + }, + "com.amazonaws.computeoptimizer#CurrentPerformanceRiskRatings": { + "type": "structure", + "members": { + "high": { + "target": "com.amazonaws.computeoptimizer#High", + "traits": { + "smithy.api#documentation": "

                                                                      A count of the applicable resource types with a high performance risk rating.

                                                                      " + } + }, + "medium": { + "target": "com.amazonaws.computeoptimizer#Medium", + "traits": { + "smithy.api#documentation": "

                                                                      A count of the applicable resource types with a medium performance risk rating.

                                                                      " + } + }, + "low": { + "target": "com.amazonaws.computeoptimizer#Low", + "traits": { + "smithy.api#documentation": "

                                                                      A count of the applicable resource types with a low performance risk rating.

                                                                      " + } + }, + "veryLow": { + "target": "com.amazonaws.computeoptimizer#VeryLow", + "traits": { + "smithy.api#documentation": "

                                                                      A count of the applicable resource types with a very low performance risk\n rating.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the performance risk ratings for a given resource type.

                                                                      \n

                                                                      Resources with a high or medium rating are at risk of not\n meeting the performance needs of their workloads, while resources with a\n low rating are performing well in their workloads.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#DeleteRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#DeleteRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#DeleteRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a recommendation preference, such as enhanced infrastructure metrics.

                                                                      \n

                                                                      For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#DeleteRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The target resource type of the recommendation preference to delete.

                                                                      \n

                                                                      The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

                                                                      ", + "smithy.api#required": {} + } + }, + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the scope of the recommendation preference to delete.

                                                                      \n

                                                                      You can delete recommendation preferences that are created at the organization level\n (for management accounts of an organization only), account level, and resource level.\n For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "recommendationPreferenceNames": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferenceNames", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the recommendation preference to delete.

                                                                      \n

                                                                      Enhanced infrastructure metrics (EnhancedInfrastructureMetrics) is the\n only feature that can be activated through preferences. Therefore, it is also the only\n recommendation preference that can be deleted.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.computeoptimizer#DeleteRecommendationPreferencesResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.computeoptimizer#DescribeRecommendationExportJobs": { "type": "operation", "input": { @@ -367,7 +534,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes recommendation export jobs created in the last seven days.

                                                                      \n\n

                                                                      Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your\n recommendations. Then use the DescribeRecommendationExportJobs action\n to view your export jobs.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes recommendation export jobs created in the last seven days.

                                                                      \n

                                                                      Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your\n recommendations. Then use the DescribeRecommendationExportJobs action\n to view your export jobs.

                                                                      " } }, "com.amazonaws.computeoptimizer#DescribeRecommendationExportJobsRequest": { @@ -376,7 +543,7 @@ "jobIds": { "target": "com.amazonaws.computeoptimizer#JobIds", "traits": { - "smithy.api#documentation": "

                                                                      The identification numbers of the export jobs to return.

                                                                      \n\n

                                                                      An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

                                                                      \n\n

                                                                      All export jobs created in the last seven days are returned if this parameter is\n omitted.

                                                                      " + "smithy.api#documentation": "

                                                                      The identification numbers of the export jobs to return.

                                                                      \n

                                                                      An export job ID is returned when you create an export using the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions.

                                                                      \n

                                                                      All export jobs created in the last seven days are returned if this parameter is\n omitted.

                                                                      " } }, "filters": { @@ -394,7 +561,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of export jobs to return with a single request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of export jobs to return with a single request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } } } @@ -411,7 +578,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of export jobs.

                                                                      \n\n

                                                                      This value is null when there are no more pages of export jobs to return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of export jobs.

                                                                      \n

                                                                      This value is null when there are no more pages of export jobs to return.

                                                                      " } } } @@ -434,18 +601,18 @@ "name": { "target": "com.amazonaws.computeoptimizer#EBSFilterName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n\n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

                                                                      " } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n\n

                                                                      The valid values are Optimized, or NotOptimized.

                                                                      " + "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n

                                                                      The valid values are Optimized, or NotOptimized.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of Amazon Elastic Block Store\n (Amazon EBS) volume recommendations. Use this filter with the GetEBSVolumeRecommendations action.

                                                                      \n\n

                                                                      You can use LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of Amazon Elastic Block Store\n (Amazon EBS) volume recommendations. Use this filter with the GetEBSVolumeRecommendations action.

                                                                      \n

                                                                      You can use LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " } }, "com.amazonaws.computeoptimizer#EBSFilterName": { @@ -509,13 +676,13 @@ "name": { "target": "com.amazonaws.computeoptimizer#EBSMetricName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n\n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n VolumeReadOpsPerSecond - The completed read operations per second\n from the volume in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeWriteOpsPerSecond - The completed write operations per\n second to the volume in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeReadBytesPerSecond - The bytes read per second from the\n volume in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeWriteBytesPerSecond - The bytes written to the volume in a\n specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n VolumeReadOpsPerSecond - The completed read operations per second\n from the volume in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeWriteOpsPerSecond - The completed write operations per\n second to the volume in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeReadBytesPerSecond - The bytes read per second from the\n volume in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VolumeWriteBytesPerSecond - The bytes written to the volume in a\n specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      " } }, "statistic": { "target": "com.amazonaws.computeoptimizer#MetricStatistic", "traits": { - "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n\n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n\n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " } }, "value": { @@ -526,7 +693,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a utilization metric of an Amazon Elastic Block Store (Amazon EBS)\n volume.

                                                                      \n\n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a utilization metric of an Amazon Elastic Block Store (Amazon EBS)\n volume.

                                                                      \n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      " } }, "com.amazonaws.computeoptimizer#EBSUtilizationMetrics": { @@ -535,19 +702,54 @@ "target": "com.amazonaws.computeoptimizer#EBSUtilizationMetric" } }, + "com.amazonaws.computeoptimizer#EffectiveRecommendationPreferences": { + "type": "structure", + "members": { + "cpuVendorArchitectures": { + "target": "com.amazonaws.computeoptimizer#CpuVendorArchitectures", + "traits": { + "smithy.api#documentation": "

                                                                      Describes the CPU vendor and architecture for an instance or Auto Scaling group\n recommendations.

                                                                      \n

                                                                      For example, when you specify AWS_ARM64 with:

                                                                      \n " + } + }, + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

                                                                      Describes the activation status of the enhanced infrastructure metrics\n preference.

                                                                      \n

                                                                      A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the effective recommendation preferences for a resource.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Active", + "name": "ACTIVE" + }, + { + "value": "Inactive", + "name": "INACTIVE" + } + ] + } + }, "com.amazonaws.computeoptimizer#EnrollmentFilter": { "type": "structure", "members": { "name": { "target": "com.amazonaws.computeoptimizer#EnrollmentFilterName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n\n

                                                                      Specify Status to return accounts with a specific enrollment status (for\n example, Active).

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n

                                                                      Specify Status to return accounts with a specific enrollment status (for\n example, Active).

                                                                      " } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n\n

                                                                      The valid values are Active, Inactive, Pending,\n and Failed.

                                                                      " + "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n

                                                                      The valid values are Active, Inactive, Pending,\n and Failed.

                                                                      " } } }, @@ -575,6 +777,26 @@ "com.amazonaws.computeoptimizer#ErrorMessage": { "type": "string" }, + "com.amazonaws.computeoptimizer#EstimatedMonthlySavings": { + "type": "structure", + "members": { + "currency": { + "target": "com.amazonaws.computeoptimizer#Currency", + "traits": { + "smithy.api#documentation": "

                                                                      The currency of the estimated monthly\n savings.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#Value", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the estimated monthly savings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the estimated monthly savings amount possible for a given resource based on\n On-Demand instance pricing

                                                                      \n

                                                                      For more information, see Estimated monthly savings and savings opportunities in the\n Compute Optimizer User Guide.

                                                                      " + } + }, "com.amazonaws.computeoptimizer#ExportAutoScalingGroupRecommendations": { "type": "operation", "input": { @@ -610,7 +832,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Auto Scaling groups.

                                                                      \n\n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n\n

                                                                      You can have only one Auto Scaling group export job in progress per Amazon Web Services Region.

                                                                      " + "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Auto Scaling groups.

                                                                      \n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n

                                                                      You can have only one Auto Scaling group export job in progress per Amazon Web Services Region.

                                                                      " } }, "com.amazonaws.computeoptimizer#ExportAutoScalingGroupRecommendationsRequest": { @@ -619,7 +841,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Auto Scaling group\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n\n

                                                                      You can specify multiple account IDs per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Auto Scaling group\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n

                                                                      You can specify multiple account IDs per request.

                                                                      " } }, "filters": { @@ -637,20 +859,20 @@ "s3DestinationConfig": { "target": "com.amazonaws.computeoptimizer#S3DestinationConfig", "traits": { - "smithy.api#documentation": "

                                                                      An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket\n name and key prefix for the export job.

                                                                      \n\n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permissions\n policy to allow Compute Optimizer to write the export file to it. If you plan to\n specify an object prefix when you create the export job, you must include the object\n prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket\n name and key prefix for the export job.

                                                                      \n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permissions\n policy to allow Compute Optimizer to write the export file to it. If you plan to specify\n an object prefix when you create the export job, you must include the object prefix in\n the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      ", "smithy.api#required": {} } }, "fileFormat": { "target": "com.amazonaws.computeoptimizer#FileFormat", "traits": { - "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n\n

                                                                      The only export file format currently supported is Csv.

                                                                      " + "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n

                                                                      The only export file format currently supported is Csv.

                                                                      " } }, "includeMemberAccounts": { "target": "com.amazonaws.computeoptimizer#IncludeMemberAccounts", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n\n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n\n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " } }, "recommendationPreferences": { @@ -667,7 +889,7 @@ "jobId": { "target": "com.amazonaws.computeoptimizer#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n\n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " + "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " } }, "s3Destination": { @@ -727,7 +949,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Amazon EBS volumes.

                                                                      \n\n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see\n Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n\n

                                                                      You can have only one Amazon EBS volume export job in progress per Amazon Web Services Region.

                                                                      " + "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Amazon EBS volumes.

                                                                      \n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n

                                                                      You can have only one Amazon EBS volume export job in progress per Amazon Web Services Region.

                                                                      " } }, "com.amazonaws.computeoptimizer#ExportEBSVolumeRecommendationsRequest": { @@ -736,7 +958,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Amazon EBS\n volume recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n\n

                                                                      You can specify multiple account IDs per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Amazon EBS\n volume recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n

                                                                      You can specify multiple account IDs per request.

                                                                      " } }, "filters": { @@ -760,13 +982,13 @@ "fileFormat": { "target": "com.amazonaws.computeoptimizer#FileFormat", "traits": { - "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n\n

                                                                      The only export file format currently supported is Csv.

                                                                      " + "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n

                                                                      The only export file format currently supported is Csv.

                                                                      " } }, "includeMemberAccounts": { "target": "com.amazonaws.computeoptimizer#IncludeMemberAccounts", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n\n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n\n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " } } } @@ -777,7 +999,7 @@ "jobId": { "target": "com.amazonaws.computeoptimizer#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n\n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " + "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " } }, "s3Destination": { @@ -820,7 +1042,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Amazon EC2 instances.

                                                                      \n\n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see\n Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n\n

                                                                      You can have only one Amazon EC2 instance export job in progress per Amazon Web Services Region.

                                                                      " + "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Amazon EC2 instances.

                                                                      \n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n

                                                                      You can have only one Amazon EC2 instance export job in progress per Amazon Web Services Region.

                                                                      " } }, "com.amazonaws.computeoptimizer#ExportEC2InstanceRecommendationsRequest": { @@ -829,7 +1051,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export instance\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n\n

                                                                      You can specify multiple account IDs per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export instance\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n

                                                                      You can specify multiple account IDs per request.

                                                                      " } }, "filters": { @@ -847,20 +1069,20 @@ "s3DestinationConfig": { "target": "com.amazonaws.computeoptimizer#S3DestinationConfig", "traits": { - "smithy.api#documentation": "

                                                                      An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket\n name and key prefix for the export job.

                                                                      \n\n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permissions\n policy policy to allow Compute Optimizer to write the export file to it. If you plan to\n specify an object prefix when you create the export job, you must include the object\n prefix in the that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      An object to specify the destination Amazon Simple Storage Service (Amazon S3) bucket\n name and key prefix for the export job.

                                                                      \n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permissions\n policy to allow Compute Optimizer to write the export file to it.\n If you plan to\n specify an object prefix when you create the export job, you must include the object\n prefix in the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      ", "smithy.api#required": {} } }, "fileFormat": { "target": "com.amazonaws.computeoptimizer#FileFormat", "traits": { - "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n\n

                                                                      The only export file format currently supported is Csv.

                                                                      " + "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n

                                                                      The only export file format currently supported is Csv.

                                                                      " } }, "includeMemberAccounts": { "target": "com.amazonaws.computeoptimizer#IncludeMemberAccounts", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n\n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n\n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " } }, "recommendationPreferences": { @@ -877,7 +1099,7 @@ "jobId": { "target": "com.amazonaws.computeoptimizer#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n\n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " + "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " } }, "s3Destination": { @@ -923,7 +1145,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Lambda functions.

                                                                      \n\n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see\n Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n\n

                                                                      You can have only one Lambda function export job in progress per Amazon Web Services Region.

                                                                      " + "smithy.api#documentation": "

                                                                      Exports optimization recommendations for Lambda functions.

                                                                      \n

                                                                      Recommendations are exported in a comma-separated values (.csv) file, and its metadata\n in a JavaScript Object Notation (JSON) (.json) file, to an existing Amazon Simple Storage Service (Amazon S3) bucket that you specify. For more information, see Exporting\n Recommendations in the Compute Optimizer User\n Guide.

                                                                      \n

                                                                      You can have only one Lambda function export job in progress per Amazon Web Services Region.

                                                                      " } }, "com.amazonaws.computeoptimizer#ExportLambdaFunctionRecommendationsRequest": { @@ -932,7 +1154,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Lambda\n function recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n\n

                                                                      You can specify multiple account IDs per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the Amazon Web Services accounts for which to export Lambda\n function recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to export recommendations.

                                                                      \n

                                                                      This parameter cannot be specified together with the include member accounts\n parameter. The parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the include member accounts parameter, is omitted.

                                                                      \n

                                                                      You can specify multiple account IDs per request.

                                                                      " } }, "filters": { @@ -956,13 +1178,13 @@ "fileFormat": { "target": "com.amazonaws.computeoptimizer#FileFormat", "traits": { - "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n\n

                                                                      The only export file format currently supported is Csv.

                                                                      " + "smithy.api#documentation": "

                                                                      The format of the export file.

                                                                      \n

                                                                      The only export file format currently supported is Csv.

                                                                      " } }, "includeMemberAccounts": { "target": "com.amazonaws.computeoptimizer#IncludeMemberAccounts", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n\n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n\n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n\n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n\n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether to include recommendations for resources in all member accounts of\n the organization if your account is the management account of an organization.

                                                                      \n

                                                                      The member accounts must also be opted in to Compute Optimizer, and trusted access for\n Compute Optimizer must be enabled in the organization account. For more information,\n see Compute Optimizer and Amazon Web Services Organizations trusted access in the\n Compute Optimizer User Guide.

                                                                      \n

                                                                      Recommendations for member accounts of the organization are not included in the export\n file if this parameter is omitted.

                                                                      \n

                                                                      This parameter cannot be specified together with the account IDs parameter. The\n parameters are mutually exclusive.

                                                                      \n

                                                                      Recommendations for member accounts are not included in the export if this parameter,\n or the account IDs parameter, is omitted.

                                                                      " } } } @@ -973,7 +1195,7 @@ "jobId": { "target": "com.amazonaws.computeoptimizer#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n\n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " + "smithy.api#documentation": "

                                                                      The identification number of the export job.

                                                                      \n

                                                                      Use the DescribeRecommendationExportJobs action, and specify the job\n ID to view the status of an export job.

                                                                      " } }, "s3Destination": { @@ -1164,6 +1386,30 @@ { "value": "LastRefreshTimestamp", "name": "LAST_REFRESH_TIMESTAMP" + }, + { + "value": "CurrentPerformanceRisk", + "name": "CURRENT_PERFORMANCE_RISK" + }, + { + "value": "RecommendationOptionsSavingsOpportunityPercentage", + "name": "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsValue", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE" + }, + { + "value": "EffectiveRecommendationPreferencesCpuVendorArchitectures", + "name": "EFFECTIVE_RECOMMENDATION_PREFERENCES_CPU_VENDOR_ARCHITECTURES" + }, + { + "value": "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", + "name": "EFFECTIVE_RECOMMENDATION_PREFERENCES_ENHANCED_INFRASTRUCTURE_METRICS" } ] } @@ -1349,6 +1595,30 @@ { "value": "LastRefreshTimestamp", "name": "LAST_REFRESH_TIMESTAMP" + }, + { + "value": "CurrentPerformanceRisk", + "name": "CURRENT_PERFORMANCE_RISK" + }, + { + "value": "RecommendationOptionsSavingsOpportunityPercentage", + "name": "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsValue", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE" + }, + { + "value": "EffectiveRecommendationPreferencesCpuVendorArchitectures", + "name": "EFFECTIVE_RECOMMENDATION_PREFERENCES_CPU_VENDOR_ARCHITECTURES" + }, + { + "value": "EffectiveRecommendationPreferencesEnhancedInfrastructureMetrics", + "name": "EFFECTIVE_RECOMMENDATION_PREFERENCES_ENHANCED_INFRASTRUCTURE_METRICS" } ] } @@ -1450,6 +1720,22 @@ { "value": "LastRefreshTimestamp", "name": "LAST_REFRESH_TIMESTAMP" + }, + { + "value": "CurrentPerformanceRisk", + "name": "CURRENT_PERFORMANCE_RISK" + }, + { + "value": "RecommendationOptionsSavingsOpportunityPercentage", + "name": "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsValue", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE" } ] } @@ -1559,6 +1845,22 @@ { "value": "LastRefreshTimestamp", "name": "LAST_REFRESH_TIMESTAMP" + }, + { + "value": "CurrentPerformanceRisk", + "name": "CURRENT_PERFORMANCE_RISK" + }, + { + "value": "RecommendationOptionsSavingsOpportunityPercentage", + "name": "RECOMMENDATION_OPTIONS_SAVINGS_OPPORTUNITY_PERCENTAGE" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsCurrency", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_CURRENCY" + }, + { + "value": "RecommendationOptionsEstimatedMonthlySavingsValue", + "name": "RECOMMENDATION_OPTIONS_ESTIMATED_MONTHLY_SAVINGS_VALUE" } ] } @@ -1589,18 +1891,18 @@ "name": { "target": "com.amazonaws.computeoptimizer#FilterName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n\n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, Underprovisioned).

                                                                      \n\n

                                                                      Specify RecommendationSourceType to return recommendations of a specific\n resource type (for example, Ec2Instance).

                                                                      \n\n

                                                                      Specify FindingReasonCodes to return recommendations with a specific\n finding reason code (for example, CPUUnderprovisioned).

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, Underprovisioned).

                                                                      \n

                                                                      Specify RecommendationSourceType to return recommendations of a specific\n resource type (for example, Ec2Instance).

                                                                      \n

                                                                      Specify FindingReasonCodes to return recommendations with a specific\n finding reason code (for example, CPUUnderprovisioned).

                                                                      " } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n\n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter and the resource type that you wish to filter results\n for:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        Specify Optimized or NotOptimized if you specify the\n name parameter as Finding and you want to filter\n results for Auto Scaling groups.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Underprovisioned, Overprovisioned, or\n Optimized if you specify the name parameter as\n Finding and you want to filter results for EC2\n instances.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as\n RecommendationSourceType.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify one of the following options if you specify the name\n parameter as FindingReasonCodes:

                                                                        \n\n
                                                                          \n
                                                                        • \n

                                                                          \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n CPUUnderprovisioned\n —\n The instance’s CPU configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better CPU performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n MemoryOverprovisioned\n —\n The instance’s memory configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n MemoryUnderprovisioned\n —\n The instance’s memory configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better memory performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSThroughputOverprovisioned\n — The\n instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSThroughputUnderprovisioned\n — The\n instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS throughput performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSIOPSOverprovisioned\n —\n The instance’s EBS IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSIOPSUnderprovisioned\n \n — The instance’s EBS IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS IOPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkBandwidthOverprovisioned\n — The\n instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkBandwidthUnderprovisioned\n — The\n instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better network bandwidth performance. This finding reason\n happens when the NetworkIn or NetworkOut\n performance of an instance is impacted.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkPPSOverprovisioned\n — The instance’s\n network PPS (packets per second) configuration can be sized down while\n still meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkPPSUnderprovisioned\n — The instance’s\n network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative\n instance type that provides better network PPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskIOPSOverprovisioned\n \n — The instance’s disk IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskIOPSUnderprovisioned\n \n — The instance’s disk IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk IOPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskThroughputOverprovisioned\n — The\n instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskThroughputUnderprovisioned\n — The\n instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk throughput performance.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter and the resource type that you wish to filter results\n for:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify Optimized or NotOptimized if you specify the\n name parameter as Finding and you want to filter\n results for Auto Scaling groups.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Underprovisioned, Overprovisioned, or\n Optimized if you specify the name parameter as\n Finding and you want to filter results for EC2\n instances.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as\n RecommendationSourceType.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify one of the following options if you specify the name\n parameter as FindingReasonCodes:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n CPUUnderprovisioned\n —\n The instance’s CPU configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better CPU performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n MemoryOverprovisioned\n —\n The instance’s memory configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n MemoryUnderprovisioned\n —\n The instance’s memory configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better memory performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSThroughputOverprovisioned\n — The\n instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSThroughputUnderprovisioned\n — The\n instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS throughput performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSIOPSOverprovisioned\n —\n The instance’s EBS IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n EBSIOPSUnderprovisioned\n \n — The instance’s EBS IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better EBS IOPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkBandwidthOverprovisioned\n — The\n instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkBandwidthUnderprovisioned\n — The\n instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better network bandwidth performance. This finding reason\n happens when the NetworkIn or NetworkOut\n performance of an instance is impacted.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkPPSOverprovisioned\n — The instance’s\n network PPS (packets per second) configuration can be sized down while\n still meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n NetworkPPSUnderprovisioned\n — The instance’s\n network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative\n instance type that provides better network PPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskIOPSOverprovisioned\n \n — The instance’s disk IOPS configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskIOPSUnderprovisioned\n \n — The instance’s disk IOPS configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk IOPS performance.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskThroughputOverprovisioned\n — The\n instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n \n DiskThroughputUnderprovisioned\n — The\n instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type\n that provides better disk throughput performance.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of recommendations. Use this\n filter with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      \n\n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and JobFilter with\n the DescribeRecommendationExportJobs action.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of recommendations. Use this\n filter with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      \n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and JobFilter with\n the DescribeRecommendationExportJobs action.

                                                                      " } }, "com.amazonaws.computeoptimizer#FilterName": { @@ -1722,7 +2024,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns Auto Scaling group recommendations.

                                                                      \n\n

                                                                      Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns Auto Scaling group recommendations.

                                                                      \n

                                                                      Compute Optimizer generates recommendations for Amazon EC2 Auto Scaling groups that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetAutoScalingGroupRecommendationsRequest": { @@ -1731,7 +2033,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return Auto Scaling group\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return Auto Scaling group\n recommendations.

                                                                      \n\n

                                                                      Only one account ID can be specified per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return Auto Scaling group\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return Auto Scaling group\n recommendations.

                                                                      \n

                                                                      Only one account ID can be specified per request.

                                                                      " } }, "autoScalingGroupArns": { @@ -1749,7 +2051,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of Auto Scaling group recommendations to return with a single\n request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of Auto Scaling group recommendations to return with a single\n request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } }, "filters": { @@ -1772,7 +2074,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of Auto Scaling group\n recommendations.

                                                                      \n\n

                                                                      This value is null when there are no more pages of Auto Scaling group\n recommendations to return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of Auto Scaling group\n recommendations.

                                                                      \n

                                                                      This value is null when there are no more pages of Auto Scaling group\n recommendations to return.

                                                                      " } }, "autoScalingGroupRecommendations": { @@ -1784,7 +2086,7 @@ "errors": { "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", "traits": { - "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n\n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group.

                                                                      " } } } @@ -1824,7 +2126,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

                                                                      \n\n

                                                                      Compute Optimizer generates recommendations for Amazon EBS volumes that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns Amazon Elastic Block Store (Amazon EBS) volume recommendations.

                                                                      \n

                                                                      Compute Optimizer generates recommendations for Amazon EBS volumes that\n meet a specific set of requirements. For more information, see the Supported\n resources and requirements in the Compute Optimizer User\n Guide.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetEBSVolumeRecommendationsRequest": { @@ -1845,7 +2147,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of volume recommendations to return with a single request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of volume recommendations to return with a single request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } }, "filters": { @@ -1857,7 +2159,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return volume\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return volume recommendations.

                                                                      \n\n

                                                                      Only one account ID can be specified per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return volume\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return volume recommendations.

                                                                      \n

                                                                      Only one account ID can be specified per request.

                                                                      " } } } @@ -1868,7 +2170,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of volume recommendations.

                                                                      \n\n

                                                                      This value is null when there are no more pages of volume recommendations to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of volume recommendations.

                                                                      \n

                                                                      This value is null when there are no more pages of volume recommendations to\n return.

                                                                      " } }, "volumeRecommendations": { @@ -1880,7 +2182,7 @@ "errors": { "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", "traits": { - "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n\n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n volume.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n volume.

                                                                      " } } } @@ -1920,7 +2222,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns Amazon EC2 instance recommendations.

                                                                      \n\n

                                                                      Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns Amazon EC2 instance recommendations.

                                                                      \n

                                                                      Compute Optimizer generates recommendations for Amazon Elastic Compute Cloud (Amazon EC2) instances that meet a specific set of requirements. For more\n information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetEC2InstanceRecommendationsRequest": { @@ -1941,7 +2243,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of instance recommendations to return with a single request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of instance recommendations to return with a single request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } }, "filters": { @@ -1953,7 +2255,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return instance\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return instance recommendations.

                                                                      \n\n

                                                                      Only one account ID can be specified per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return instance\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return instance recommendations.

                                                                      \n

                                                                      Only one account ID can be specified per request.

                                                                      " } }, "recommendationPreferences": { @@ -1970,7 +2272,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of instance recommendations.

                                                                      \n\n

                                                                      This value is null when there are no more pages of instance recommendations to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of instance recommendations.

                                                                      \n

                                                                      This value is null when there are no more pages of instance recommendations to\n return.

                                                                      " } }, "instanceRecommendations": { @@ -1982,7 +2284,7 @@ "errors": { "target": "com.amazonaws.computeoptimizer#GetRecommendationErrors", "traits": { - "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n\n

                                                                      For example, an error is returned if you request recommendations for an instance of an\n unsupported instance family.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of objects that describe errors of the request.

                                                                      \n

                                                                      For example, an error is returned if you request recommendations for an instance of an\n unsupported instance family.

                                                                      " } } } @@ -2022,7 +2324,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the projected utilization metrics of Amazon EC2 instance\n recommendations.

                                                                      \n\n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run this action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Returns the projected utilization metrics of Amazon EC2 instance\n recommendations.

                                                                      \n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run this action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " } }, "com.amazonaws.computeoptimizer#GetEC2RecommendationProjectedMetricsRequest": { @@ -2082,6 +2384,67 @@ } } }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the recommendation preferences that are in effect for a given resource, such\n as enhanced infrastructure metrics. Considers all applicable preferences that you might\n have set at the resource, account, and organization level.

                                                                      \n

                                                                      When you create a recommendation preference, you can set its status to\n Active or Inactive. Use this action to view the\n recommendation preferences that are in effect, or Active.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.computeoptimizer#ResourceArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource for which to confirm effective\n recommendation preferences. Only EC2 instance and Auto Scaling group ARNs are\n currently supported.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.computeoptimizer#GetEffectiveRecommendationPreferencesResponse": { + "type": "structure", + "members": { + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the enhanced infrastructure metrics recommendation preference. Considers\n all applicable preferences that you might have set at the resource, account, and\n organization level.

                                                                      \n

                                                                      A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied.

                                                                      \n

                                                                      To validate whether the preference is applied to your last generated set of\n recommendations, review the effectiveRecommendationPreferences value in the\n response of the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " + } + } + } + }, "com.amazonaws.computeoptimizer#GetEnrollmentStatus": { "type": "operation", "input": { @@ -2111,7 +2474,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the enrollment (opt in) status of an account to the Compute Optimizer\n service.

                                                                      \n\n

                                                                      If the account is the management account of an organization, this action also confirms\n the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information\n about the enrollment status of member accounts of an organization.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the enrollment (opt in) status of an account to the Compute Optimizer\n service.

                                                                      \n

                                                                      If the account is the management account of an organization, this action also confirms\n the enrollment status of member accounts of the organization. Use the GetEnrollmentStatusesForOrganization action to get detailed information\n about the enrollment status of member accounts of an organization.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetEnrollmentStatusRequest": { @@ -2130,7 +2493,7 @@ "statusReason": { "target": "com.amazonaws.computeoptimizer#StatusReason", "traits": { - "smithy.api#documentation": "

                                                                      The reason for the enrollment status of the account.

                                                                      \n\n

                                                                      For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

                                                                      " + "smithy.api#documentation": "

                                                                      The reason for the enrollment status of the account.

                                                                      \n

                                                                      For example, an account might show a status of Pending because member\n accounts of an organization require more time to be enrolled in the service.

                                                                      " } }, "memberAccountsEnrolled": { @@ -2182,7 +2545,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the Compute Optimizer enrollment (opt-in) status of organization member\n accounts, if your account is an organization management account.

                                                                      \n\n

                                                                      To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the Compute Optimizer enrollment (opt-in) status of organization member\n accounts, if your account is an organization management account.

                                                                      \n

                                                                      To get the enrollment status of standalone accounts, use the GetEnrollmentStatus action.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetEnrollmentStatusesForOrganizationRequest": { @@ -2203,7 +2566,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of account enrollment statuses to return with a single request. You\n can specify up to 100 statuses to return with each request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of account enrollment statuses to return with a single request. You\n can specify up to 100 statuses to return with each request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } } } @@ -2220,7 +2583,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of account enrollment statuses.

                                                                      \n\n

                                                                      This value is null when there are no more pages of account enrollment statuses to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of account enrollment statuses.

                                                                      \n

                                                                      This value is null when there are no more pages of account enrollment statuses to\n return.

                                                                      " } } } @@ -2260,7 +2623,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns Lambda function recommendations.

                                                                      \n\n

                                                                      Compute Optimizer generates recommendations for functions that meet a specific set\n of requirements. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns Lambda function recommendations.

                                                                      \n

                                                                      Compute Optimizer generates recommendations for functions that meet a specific set\n of requirements. For more information, see the Supported resources and\n requirements in the Compute Optimizer User\n Guide.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetLambdaFunctionRecommendationsRequest": { @@ -2269,13 +2632,13 @@ "functionArns": { "target": "com.amazonaws.computeoptimizer#FunctionArns", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the functions for which to return\n recommendations.

                                                                      \n\n

                                                                      You can specify a qualified or unqualified ARN. If you specify an unqualified ARN\n without a function version suffix, Compute Optimizer will return recommendations for the\n latest ($LATEST) version of the function. If you specify a qualified ARN\n with a version suffix, Compute Optimizer will return recommendations for the specified\n function version. For more information about using function versions, see Using\n versions in the Lambda Developer\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the functions for which to return\n recommendations.

                                                                      \n

                                                                      You can specify a qualified or unqualified ARN. If you specify an unqualified ARN\n without a function version suffix, Compute Optimizer will return recommendations for the\n latest ($LATEST) version of the function. If you specify a qualified ARN\n with a version suffix, Compute Optimizer will return recommendations for the specified\n function version. For more information about using function versions, see Using\n versions in the Lambda Developer\n Guide.

                                                                      " } }, "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return function\n recommendations.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return function recommendations.

                                                                      \n\n

                                                                      Only one account ID can be specified per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return function\n recommendations.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return function recommendations.

                                                                      \n

                                                                      Only one account ID can be specified per request.

                                                                      " } }, "filters": { @@ -2293,7 +2656,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of function recommendations to return with a single request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of function recommendations to return with a single request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } } } @@ -2304,7 +2667,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of function recommendations.

                                                                      \n\n

                                                                      This value is null when there are no more pages of function recommendations to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of function recommendations.

                                                                      \n

                                                                      This value is null when there are no more pages of function recommendations to\n return.

                                                                      " } }, "lambdaFunctionRecommendations": { @@ -2338,7 +2701,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes an error experienced when getting recommendations.

                                                                      \n\n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group, or if you request recommendations for an instance of an\n unsupported instance family.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes an error experienced when getting recommendations.

                                                                      \n

                                                                      For example, an error is returned if you request recommendations for an unsupported\n Auto Scaling group, or if you request recommendations for an instance of an\n unsupported instance family.

                                                                      " } }, "com.amazonaws.computeoptimizer#GetRecommendationErrors": { @@ -2347,6 +2710,91 @@ "target": "com.amazonaws.computeoptimizer#GetRecommendationError" } }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns existing recommendation preferences, such as enhanced infrastructure\n metrics.

                                                                      \n

                                                                      Use the scope parameter to specify which preferences to return. You can\n specify to return preferences for an organization, a specific account ID, or a specific\n EC2 instance or Auto Scaling group Amazon Resource Name (ARN).

                                                                      \n

                                                                      For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The target resource type of the recommendation preference for which to return\n preferences.

                                                                      \n

                                                                      The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

                                                                      ", + "smithy.api#required": {} + } + }, + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the scope of the recommendation preference to return.

                                                                      \n

                                                                      You can return recommendation preferences that are created at the organization level\n (for management accounts of an organization only), account level, and resource level.\n For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to advance to the next page of recommendation preferences.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.computeoptimizer#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of recommendation preferences to return with a single\n request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + } + } + } + }, + "com.amazonaws.computeoptimizer#GetRecommendationPreferencesResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.computeoptimizer#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of recommendation preferences.

                                                                      \n

                                                                      This value is null when there are no more pages of recommendation preferences to\n return.

                                                                      " + } + }, + "recommendationPreferencesDetails": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferencesDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An array of objects that describe recommendation preferences.

                                                                      " + } + } + } + }, "com.amazonaws.computeoptimizer#GetRecommendationSummaries": { "type": "operation", "input": { @@ -2379,7 +2827,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the optimization findings for an account.

                                                                      \n\n

                                                                      It returns the number of:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        Amazon EC2 instances in an account that are\n Underprovisioned, Overprovisioned, or\n Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Auto Scaling groups in an account that are NotOptimized, or\n Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon EBS volumes in an account that are NotOptimized,\n or Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Lambda functions in an account that are NotOptimized,\n or Optimized.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Returns the optimization findings for an account.

                                                                      \n

                                                                      It returns the number of:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Amazon EC2 instances in an account that are\n Underprovisioned, Overprovisioned, or\n Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Auto Scaling groups in an account that are NotOptimized, or\n Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon EBS volumes in an account that are NotOptimized,\n or Optimized.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Lambda functions in an account that are NotOptimized,\n or Optimized.

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.computeoptimizer#GetRecommendationSummariesRequest": { @@ -2388,7 +2836,7 @@ "accountIds": { "target": "com.amazonaws.computeoptimizer#AccountIds", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return recommendation\n summaries.

                                                                      \n\n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return recommendation summaries.

                                                                      \n\n

                                                                      Only one account ID can be specified per request.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account for which to return recommendation\n summaries.

                                                                      \n

                                                                      If your account is the management account of an organization, use this parameter to\n specify the member account for which you want to return recommendation summaries.

                                                                      \n

                                                                      Only one account ID can be specified per request.

                                                                      " } }, "nextToken": { @@ -2400,7 +2848,7 @@ "maxResults": { "target": "com.amazonaws.computeoptimizer#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of recommendation summaries to return with a single request.

                                                                      \n\n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of recommendation summaries to return with a single request.

                                                                      \n

                                                                      To retrieve the remaining results, make another request with the returned\n nextToken value.

                                                                      " } } } @@ -2411,7 +2859,7 @@ "nextToken": { "target": "com.amazonaws.computeoptimizer#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of recommendation summaries.

                                                                      \n\n

                                                                      This value is null when there are no more pages of recommendation summaries to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The token to use to advance to the next page of recommendation summaries.

                                                                      \n

                                                                      This value is null when there are no more pages of recommendation summaries to\n return.

                                                                      " } }, "recommendationSummaries": { @@ -2422,6 +2870,9 @@ } } }, + "com.amazonaws.computeoptimizer#High": { + "type": "long" + }, "com.amazonaws.computeoptimizer#Identifier": { "type": "string" }, @@ -2470,13 +2921,13 @@ "finding": { "target": "com.amazonaws.computeoptimizer#Finding", "traits": { - "smithy.api#documentation": "

                                                                      The finding classification of the instance.

                                                                      \n\n

                                                                      Findings for instances include:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n \n Underprovisioned\n —An instance is\n considered under-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, does not meet the performance requirements of\n your workload. Under-provisioned instances may lead to poor application\n performance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Overprovisioned\n —An instance is\n considered over-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, can be sized down while still meeting the\n performance requirements of your workload, and no specification is\n under-provisioned. Over-provisioned instances may lead to unnecessary\n infrastructure cost.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An instance is\n considered optimized when all specifications of your instance, such as CPU,\n memory, and network, meet the performance requirements of your workload and is\n not over provisioned. For optimized resources, Compute Optimizer might\n recommend a new generation instance type.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The finding classification of the instance.

                                                                      \n

                                                                      Findings for instances include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n Underprovisioned\n —An instance is\n considered under-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, does not meet the performance requirements of\n your workload. Under-provisioned instances may lead to poor application\n performance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Overprovisioned\n —An instance is\n considered over-provisioned when at least one specification of your instance,\n such as CPU, memory, or network, can be sized down while still meeting the\n performance requirements of your workload, and no specification is\n under-provisioned. Over-provisioned instances may lead to unnecessary\n infrastructure cost.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An instance is\n considered optimized when all specifications of your instance, such as CPU,\n memory, and network, meet the performance requirements of your workload and is\n not over provisioned. For optimized resources, Compute Optimizer might\n recommend a new generation instance type.

                                                                        \n
                                                                      • \n
                                                                      " } }, "findingReasonCodes": { "target": "com.amazonaws.computeoptimizer#InstanceRecommendationFindingReasonCodes", "traits": { - "smithy.api#documentation": "

                                                                      The reason for the finding classification of the instance.

                                                                      \n\n

                                                                      Finding reason codes for instances include:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n CPUUtilization metric of the current instance during the\n look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CPUUnderprovisioned\n — The\n instance’s CPU configuration doesn't meet the performance requirements of your\n workload and there is an alternative instance type that provides better CPU\n performance. This is identified by analyzing the CPUUtilization\n metric of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MemoryOverprovisioned\n — The\n instance’s memory configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n memory utilization metric of the current instance during the look-back\n period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MemoryUnderprovisioned\n — The\n instance’s memory configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n memory performance. This is identified by analyzing the memory utilization\n metric of the current instance during the look-back period.

                                                                        \n\n \n

                                                                        Memory utilization is analyzed only for resources that have the unified\n CloudWatch agent installed on them. For more information, see\n Enabling memory\n utilization with the Amazon CloudWatch Agent in the\n Compute Optimizer User Guide. On Linux\n instances, Compute Optimizer analyses the mem_used_percent\n metric in the CWAgent namespace, or the legacy\n MemoryUtilization metric in the System/Linux\n namespace. On Windows instances, Compute Optimizer analyses the Memory\n % Committed Bytes In Use metric in the CWAgent\n namespace.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSThroughputOverprovisioned\n —\n The instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the VolumeReadOps and VolumeWriteOps metrics\n of EBS volumes attached to the current instance during the look-back\n period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSThroughputUnderprovisioned\n —\n The instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better EBS throughput performance. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metrics of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSIOPSOverprovisioned\n — The\n instance’s EBS IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metric of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSIOPSUnderprovisioned\n — The\n instance’s EBS IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better EBS\n IOPS performance. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metric of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkBandwidthOverprovisioned\n \n — The instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the NetworkIn and NetworkOut metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkBandwidthUnderprovisioned\n \n — The instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better network bandwidth performance. This is identified by analyzing\n the NetworkIn and NetworkOut metrics of the current\n instance during the look-back period. This finding reason happens when the\n NetworkIn or NetworkOut performance of an instance\n is impacted.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkPPSOverprovisioned\n — The\n instance’s network PPS (packets per second) configuration can be sized down\n while still meeting the performance requirements of your workload. This is\n identified by analyzing the NetworkPacketsIn and\n NetworkPacketsIn metrics of the current instance during the\n look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkPPSUnderprovisioned\n — The\n instance’s network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative instance\n type that provides better network PPS performance. This is identified by\n analyzing the NetworkPacketsIn and NetworkPacketsIn\n metrics of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskIOPSOverprovisioned\n — The\n instance’s disk IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskIOPSUnderprovisioned\n — The\n instance’s disk IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n disk IOPS performance. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskThroughputOverprovisioned\n —\n The instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the DiskReadBytes and DiskWriteBytes metrics\n of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskThroughputUnderprovisioned\n —\n The instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better disk throughput performance. This is identified by analyzing the\n DiskReadBytes and DiskWriteBytes metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      For more information about instance metrics, see List the\n available CloudWatch metrics for your instances in the\n Amazon Elastic Compute Cloud User Guide. For more information\n about EBS volume metrics, see Amazon CloudWatch\n metrics for Amazon EBS in the Amazon Elastic Compute Cloud\n User Guide.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The reason for the finding classification of the instance.

                                                                      \n

                                                                      Finding reason codes for instances include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n CPUOverprovisioned\n — The\n instance’s CPU configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n CPUUtilization metric of the current instance during the\n look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CPUUnderprovisioned\n — The\n instance’s CPU configuration doesn't meet the performance requirements of your\n workload and there is an alternative instance type that provides better CPU\n performance. This is identified by analyzing the CPUUtilization\n metric of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MemoryOverprovisioned\n — The\n instance’s memory configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n memory utilization metric of the current instance during the look-back\n period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MemoryUnderprovisioned\n — The\n instance’s memory configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n memory performance. This is identified by analyzing the memory utilization\n metric of the current instance during the look-back period.

                                                                        \n \n

                                                                        Memory utilization is analyzed only for resources that have the unified\n CloudWatch agent installed on them. For more information, see\n Enabling memory\n utilization with the Amazon CloudWatch Agent in the\n Compute Optimizer User Guide. On Linux\n instances, Compute Optimizer analyses the mem_used_percent\n metric in the CWAgent namespace, or the legacy\n MemoryUtilization metric in the System/Linux\n namespace. On Windows instances, Compute Optimizer analyses the Memory\n % Committed Bytes In Use metric in the CWAgent\n namespace.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSThroughputOverprovisioned\n —\n The instance’s EBS throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the VolumeReadOps and VolumeWriteOps metrics\n of EBS volumes attached to the current instance during the look-back\n period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSThroughputUnderprovisioned\n —\n The instance’s EBS throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better EBS throughput performance. This is identified by analyzing the\n VolumeReadOps and VolumeWriteOps metrics of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSIOPSOverprovisioned\n — The\n instance’s EBS IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metric of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EBSIOPSUnderprovisioned\n — The\n instance’s EBS IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better EBS\n IOPS performance. This is identified by analyzing the\n VolumeReadBytes and VolumeWriteBytes metric of EBS\n volumes attached to the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkBandwidthOverprovisioned\n \n — The instance’s network bandwidth configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the NetworkIn and NetworkOut metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkBandwidthUnderprovisioned\n \n — The instance’s network bandwidth configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better network bandwidth performance. This is identified by analyzing\n the NetworkIn and NetworkOut metrics of the current\n instance during the look-back period. This finding reason happens when the\n NetworkIn or NetworkOut performance of an instance\n is impacted.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkPPSOverprovisioned\n — The\n instance’s network PPS (packets per second) configuration can be sized down\n while still meeting the performance requirements of your workload. This is\n identified by analyzing the NetworkPacketsIn and\n NetworkPacketsIn metrics of the current instance during the\n look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkPPSUnderprovisioned\n — The\n instance’s network PPS (packets per second) configuration doesn't meet the\n performance requirements of your workload and there is an alternative instance\n type that provides better network PPS performance. This is identified by\n analyzing the NetworkPacketsIn and NetworkPacketsIn\n metrics of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskIOPSOverprovisioned\n — The\n instance’s disk IOPS configuration can be sized down while still meeting the\n performance requirements of your workload. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskIOPSUnderprovisioned\n — The\n instance’s disk IOPS configuration doesn't meet the performance requirements of\n your workload and there is an alternative instance type that provides better\n disk IOPS performance. This is identified by analyzing the\n DiskReadOps and DiskWriteOps metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskThroughputOverprovisioned\n —\n The instance’s disk throughput configuration can be sized down while still\n meeting the performance requirements of your workload. This is identified by\n analyzing the DiskReadBytes and DiskWriteBytes metrics\n of the current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n DiskThroughputUnderprovisioned\n —\n The instance’s disk throughput configuration doesn't meet the performance\n requirements of your workload and there is an alternative instance type that\n provides better disk throughput performance. This is identified by analyzing the\n DiskReadBytes and DiskWriteBytes metrics of the\n current instance during the look-back period.

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      For more information about instance metrics, see List the\n available CloudWatch metrics for your instances in the\n Amazon Elastic Compute Cloud User Guide. For more information\n about EBS volume metrics, see Amazon CloudWatch\n metrics for Amazon EBS in the Amazon Elastic Compute Cloud\n User Guide.

                                                                      \n
                                                                      " } }, "utilizationMetrics": { @@ -2506,7 +2957,19 @@ "lastRefreshTimestamp": { "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#documentation": "

                                                                      The timestamp of when the instance recommendation was last refreshed.

                                                                      " + "smithy.api#documentation": "

                                                                      The timestamp of when the instance recommendation was last generated.

                                                                      " + } + }, + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "traits": { + "smithy.api#documentation": "

                                                                      The risk of the current instance not meeting the performance needs of its workloads.\n The higher the risk, the more likely the current Lambda function requires\n more memory.

                                                                      " + } + }, + "effectiveRecommendationPreferences": { + "target": "com.amazonaws.computeoptimizer#EffectiveRecommendationPreferences", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the effective recommendation preferences for the\n instance.

                                                                      " } } }, @@ -2603,25 +3066,31 @@ "projectedUtilizationMetrics": { "target": "com.amazonaws.computeoptimizer#ProjectedUtilizationMetrics", "traits": { - "smithy.api#documentation": "

                                                                      An array of objects that describe the projected utilization metrics of the instance\n recommendation option.

                                                                      \n\n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      An array of objects that describe the projected utilization metrics of the instance\n recommendation option.

                                                                      \n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned. Additionally, the Memory metric is\n returned only for resources that have the unified CloudWatch agent installed\n on them. For more information, see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " } }, "platformDifferences": { "target": "com.amazonaws.computeoptimizer#PlatformDifferences", "traits": { - "smithy.api#documentation": "

                                                                      Describes the configuration differences between the current instance and the\n recommended instance type. You should consider the configuration differences before\n migrating your workloads from the current instance to the recommended instance type. The\n Change the instance type guide for Linux and Change the instance type\n guide for Windows provide general guidance for getting started with an\n instance migration.

                                                                      \n\n

                                                                      Platform differences include:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n \n Hypervisor\n — The hypervisor of\n the recommended instance type is different than that of the current instance.\n For example, the recommended instance type uses a Nitro hypervisor and the\n current instance uses a Xen hypervisor. The differences that you should consider\n between these hypervisors are covered in the Nitro Hypervisor section of the\n Amazon EC2 frequently asked questions. For more information, see\n Instances built on the Nitro System in the Amazon EC2\n User Guide for Linux, or Instances built on the Nitro System in the Amazon EC2\n User Guide for Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkInterface\n — The network\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type supports enhanced\n networking and the current instance might not. To enable enhanced networking for\n the recommended instance type, you must install the Elastic Network Adapter\n (ENA) driver or the Intel 82599 Virtual Function driver. For more information,\n see Networking and storage features and Enhanced networking\n on Linux in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Enhanced\n networking on Windows in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n StorageInterface\n — The storage\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type uses an NVMe storage\n interface and the current instance does not. To access NVMe volumes for the\n recommended instance type, you will need to install or upgrade the NVMe driver.\n For more information, see Networking and storage features and Amazon EBS and NVMe on\n Linux instances in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Amazon EBS and NVMe\n on Windows instances in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n InstanceStoreAvailability\n — The\n recommended instance type does not support instance store volumes and the\n current instance does. Before migrating, you might need to back up the data on\n your instance store volumes if you want to preserve them. For more information,\n see How do I back up an instance store volume on my Amazon EC2 instance\n to Amazon EBS? in the Amazon Web Services Premium\n Support Knowledge Base. For more information, see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Linux, or see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n VirtualizationType\n — The\n recommended instance type uses the hardware virtual machine (HVM) virtualization\n type and the current instance uses the paravirtual (PV) virtualization type. For\n more information about the differences between these virtualization types, see\n Linux AMI\n virtualization types in the Amazon EC2 User Guide for\n Linux, or Windows AMI virtualization types in the Amazon EC2 User\n Guide for Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Architecture\n — The CPU\n architecture between the recommended instance type and the current instance is\n different. For example, the recommended instance type might use an Arm CPU\n architecture and the current instance type might use a different one, such as\n x86. Before migrating, you should consider recompiling the software on your\n instance for the new architecture. Alternatively, you might switch to an Amazon\n Machine Image (AMI) that supports the new architecture. For more information\n about the CPU architecture for each instance type, see Amazon EC2 Instance Types.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes the configuration differences between the current instance and the\n recommended instance type. You should consider the configuration differences before\n migrating your workloads from the current instance to the recommended instance type. The\n Change the instance type guide for Linux and Change the instance type\n guide for Windows provide general guidance for getting started with an\n instance migration.

                                                                      \n

                                                                      Platform differences include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n Hypervisor\n — The hypervisor of\n the recommended instance type is different than that of the current instance.\n For example, the recommended instance type uses a Nitro hypervisor and the\n current instance uses a Xen hypervisor. The differences that you should consider\n between these hypervisors are covered in the Nitro Hypervisor section of the\n Amazon EC2 frequently asked questions. For more information, see\n Instances built on the Nitro System in the Amazon EC2\n User Guide for Linux, or Instances built on the Nitro System in the Amazon EC2\n User Guide for Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NetworkInterface\n — The network\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type supports enhanced\n networking and the current instance might not. To enable enhanced networking for\n the recommended instance type, you must install the Elastic Network Adapter\n (ENA) driver or the Intel 82599 Virtual Function driver. For more information,\n see Networking and storage features and Enhanced networking\n on Linux in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Enhanced\n networking on Windows in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n StorageInterface\n — The storage\n interface of the recommended instance type is different than that of the current\n instance. For example, the recommended instance type uses an NVMe storage\n interface and the current instance does not. To access NVMe volumes for the\n recommended instance type, you will need to install or upgrade the NVMe driver.\n For more information, see Networking and storage features and Amazon EBS and NVMe on\n Linux instances in the Amazon EC2 User Guide for\n Linux, or Networking and storage features and Amazon EBS and NVMe\n on Windows instances in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n InstanceStoreAvailability\n — The\n recommended instance type does not support instance store volumes and the\n current instance does. Before migrating, you might need to back up the data on\n your instance store volumes if you want to preserve them. For more information,\n see How do I back up an instance store volume on my Amazon EC2 instance\n to Amazon EBS? in the Amazon Web Services Premium\n Support Knowledge Base. For more information, see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Linux, or see Networking and storage features and Amazon EC2\n instance store in the Amazon EC2 User Guide for\n Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n VirtualizationType\n — The\n recommended instance type uses the hardware virtual machine (HVM) virtualization\n type and the current instance uses the paravirtual (PV) virtualization type. For\n more information about the differences between these virtualization types, see\n Linux AMI\n virtualization types in the Amazon EC2 User Guide for\n Linux, or Windows AMI virtualization types in the Amazon EC2 User\n Guide for Windows.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Architecture\n — The CPU\n architecture between the recommended instance type and the current instance is\n different. For example, the recommended instance type might use an Arm CPU\n architecture and the current instance type might use a different one, such as\n x86. Before migrating, you should consider recompiling the software on your\n instance for the new architecture. Alternatively, you might switch to an Amazon\n Machine Image (AMI) that supports the new architecture. For more information\n about the CPU architecture for each instance type, see Amazon EC2 Instance Types.

                                                                        \n
                                                                      • \n
                                                                      " } }, "performanceRisk": { "target": "com.amazonaws.computeoptimizer#PerformanceRisk", "traits": { - "smithy.api#documentation": "

                                                                      The performance risk of the instance recommendation option.

                                                                      \n\n

                                                                      Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

                                                                      \n\n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " + "smithy.api#documentation": "

                                                                      The performance risk of the instance recommendation option.

                                                                      \n

                                                                      Performance risk indicates the likelihood of the recommended instance type not meeting\n the resource needs of your workload. Compute Optimizer calculates an individual\n performance risk score for each specification of the recommended instance, including\n CPU, memory, EBS throughput, EBS IOPS, disk throughput, disk IOPS, network throughput,\n and network PPS.\n The performance\n risk of the recommended instance is calculated as the maximum performance risk score\n across the analyzed resource specifications.

                                                                      \n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " } }, "rank": { "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

                                                                      The rank of the instance recommendation option.

                                                                      \n\n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + "smithy.api#documentation": "

                                                                      The rank of the instance recommendation option.

                                                                      \n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the savings opportunity for the instance recommendation\n option. Savings opportunity includes the estimated monthly savings amount and\n percentage.

                                                                      " } } }, @@ -2670,18 +3139,18 @@ "name": { "target": "com.amazonaws.computeoptimizer#JobFilterName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n\n

                                                                      Specify ResourceType to return export jobs of a specific resource type\n (for example, Ec2Instance).

                                                                      \n\n

                                                                      Specify JobStatus to return export jobs with a specific status (e.g,\n Complete).

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n

                                                                      Specify ResourceType to return export jobs of a specific resource type\n (for example, Ec2Instance).

                                                                      \n

                                                                      Specify JobStatus to return export jobs with a specific status (e.g,\n Complete).

                                                                      " } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n\n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as ResourceType. There is\n no filter for EBS volumes because volume recommendations cannot be exported at\n this time.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Queued, InProgress, Complete,\n or Failed if you specify the name parameter as\n JobStatus.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify Ec2Instance or AutoScalingGroup if you\n specify the name parameter as ResourceType. There is\n no filter for EBS volumes because volume recommendations cannot be exported at\n this time.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify Queued, InProgress, Complete,\n or Failed if you specify the name parameter as\n JobStatus.

                                                                        \n
                                                                      • \n
                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of recommendation export jobs.\n Use this filter with the DescribeRecommendationExportJobs\n action.

                                                                      \n\n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and Filter with\n the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of recommendation export jobs.\n Use this filter with the DescribeRecommendationExportJobs\n action.

                                                                      \n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action,\n LambdaFunctionRecommendationFilter with the GetLambdaFunctionRecommendations action, and Filter with\n the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " } }, "com.amazonaws.computeoptimizer#JobFilterName": { @@ -2805,7 +3274,7 @@ "rank": { "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

                                                                      The rank of the function recommendation option.

                                                                      \n\n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + "smithy.api#documentation": "

                                                                      The rank of the function recommendation option.

                                                                      \n

                                                                      The top recommendation option is ranked as 1.

                                                                      " } }, "memorySize": { @@ -2819,6 +3288,12 @@ "traits": { "smithy.api#documentation": "

                                                                      An array of objects that describe the projected utilization metrics of the function\n recommendation option.

                                                                      " } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the savings opportunity for the Lambda function\n recommendation option. Savings opportunity includes the estimated monthly savings amount\n and percentage.

                                                                      " + } } }, "traits": { @@ -2909,13 +3384,13 @@ "lastRefreshTimestamp": { "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#documentation": "

                                                                      The timestamp of when the function recommendation was last refreshed.

                                                                      " + "smithy.api#documentation": "

                                                                      The timestamp of when the function recommendation was last generated.

                                                                      " } }, "finding": { "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFinding", "traits": { - "smithy.api#documentation": "

                                                                      The finding classification of the function.

                                                                      \n\n

                                                                      Findings for functions include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n Optimized\n — The function is\n correctly provisioned to run your workload based on its current configuration\n and its utilization history. This finding classification does not include\n finding reason codes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NotOptimized\n — The function is\n performing at a higher level (over-provisioned) or at a lower level\n (under-provisioned) than required for your workload because its current\n configuration is not optimal. Over-provisioned resources might lead to\n unnecessary infrastructure cost, and under-provisioned resources might lead to\n poor application performance. This finding classification can include the\n MemoryUnderprovisioned and MemoryUnderprovisioned\n finding reason codes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Unavailable\n — Compute Optimizer\n was unable to generate a recommendation for the function. This could be because\n the function has not accumulated sufficient metric data, or the function does\n not qualify for a recommendation. This finding classification can include the\n InsufficientData and Inconclusive finding reason\n codes.

                                                                        \n \n

                                                                        Functions with a finding of unavailable are not returned unless you\n specify the filter parameter with a value of\n Unavailable in your\n GetLambdaFunctionRecommendations request.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The finding classification of the function.

                                                                      \n

                                                                      Findings for functions include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n Optimized\n — The function is\n correctly provisioned to run your workload based on its current configuration\n and its utilization history. This finding classification does not include\n finding reason codes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NotOptimized\n — The function is\n performing at a higher level (over-provisioned) or at a lower level\n (under-provisioned) than required for your workload because its current\n configuration is not optimal. Over-provisioned resources might lead to\n unnecessary infrastructure cost, and under-provisioned resources might lead to\n poor application performance. This finding classification can include the\n MemoryUnderprovisioned and MemoryUnderprovisioned\n finding reason codes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Unavailable\n — Compute Optimizer\n was unable to generate a recommendation for the function. This could be because\n the function has not accumulated sufficient metric data, or the function does\n not qualify for a recommendation. This finding classification can include the\n InsufficientData and Inconclusive finding reason\n codes.

                                                                        \n \n

                                                                        Functions with a finding of unavailable are not returned unless you\n specify the filter parameter with a value of\n Unavailable in your\n GetLambdaFunctionRecommendations request.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      " } }, "findingReasonCodes": { @@ -2929,6 +3404,12 @@ "traits": { "smithy.api#documentation": "

                                                                      An array of objects that describe the memory configuration recommendation options for\n the function.

                                                                      " } + }, + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "traits": { + "smithy.api#documentation": "

                                                                      The risk of the current Lambda function not meeting the performance needs\n of its workloads. The higher the risk, the more likely the current Lambda\n function configuration is underperforming in its workload.

                                                                      " + } } }, "traits": { @@ -2941,18 +3422,18 @@ "name": { "target": "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n\n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

                                                                      \n\n

                                                                      Specify FindingReasonCode to return recommendations with a specific\n finding reason code (for example, MemoryUnderprovisioned).

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      \n

                                                                      Specify Finding to return recommendations with a specific finding\n classification (for example, NotOptimized).

                                                                      \n

                                                                      Specify FindingReasonCode to return recommendations with a specific\n finding reason code (for example, MemoryUnderprovisioned).

                                                                      " } }, "values": { "target": "com.amazonaws.computeoptimizer#FilterValues", "traits": { - "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n\n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        Specify Optimized, NotOptimized, or\n Unavailable if you specify the name parameter as\n Finding.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify MemoryOverprovisioned,\n MemoryUnderprovisioned, InsufficientData, or\n Inconclusive if you specify the name parameter as\n FindingReasonCode.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The value of the filter.

                                                                      \n

                                                                      The valid values for this parameter are as follows, depending on what you specify for\n the name parameter:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify Optimized, NotOptimized, or\n Unavailable if you specify the name parameter as\n Finding.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify MemoryOverprovisioned,\n MemoryUnderprovisioned, InsufficientData, or\n Inconclusive if you specify the name parameter as\n FindingReasonCode.

                                                                        \n
                                                                      • \n
                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of Lambda\n function recommendations. Use this filter with the GetLambdaFunctionRecommendations action.

                                                                      \n\n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter that returns a more specific list of Lambda\n function recommendations. Use this filter with the GetLambdaFunctionRecommendations action.

                                                                      \n

                                                                      You can use EBSFilter with the GetEBSVolumeRecommendations action, JobFilter with the\n DescribeRecommendationExportJobs action, and Filter\n with the GetAutoScalingGroupRecommendations and GetEC2InstanceRecommendations actions.

                                                                      " } }, "com.amazonaws.computeoptimizer#LambdaFunctionRecommendationFilterName": { @@ -3036,13 +3517,13 @@ "name": { "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n\n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Duration - The amount of time that your function code spends\n processing an event.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The amount of memory used per invocation.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Duration - The amount of time that your function code spends\n processing an event.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The amount of memory used per invocation.

                                                                        \n
                                                                      • \n
                                                                      " } }, "statistic": { "target": "com.amazonaws.computeoptimizer#LambdaFunctionMetricStatistic", "traits": { - "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n\n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n\n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " } }, "value": { @@ -3084,6 +3565,9 @@ "com.amazonaws.computeoptimizer#LookBackPeriodInDays": { "type": "double" }, + "com.amazonaws.computeoptimizer#Low": { + "type": "long" + }, "com.amazonaws.computeoptimizer#MaxResults": { "type": "integer", "traits": { @@ -3093,6 +3577,9 @@ "com.amazonaws.computeoptimizer#MaxSize": { "type": "integer" }, + "com.amazonaws.computeoptimizer#Medium": { + "type": "long" + }, "com.amazonaws.computeoptimizer#MemberAccountsEnrolled": { "type": "boolean" }, @@ -3288,7 +3775,7 @@ "name": { "target": "com.amazonaws.computeoptimizer#MetricName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the projected utilization metric.

                                                                      \n\n

                                                                      The following projected utilization metrics are returned:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Cpu - The projected percentage of allocated EC2 compute units\n that would be in use on the recommendation option had you used that resource\n during the analyzed period. This metric identifies the processing power required\n to run an application on the recommendation option.

                                                                        \n

                                                                        Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

                                                                        \n

                                                                        Units: Percent

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The percentage of memory that would be in use on the\n recommendation option had you used that resource during the analyzed period.\n This metric identifies the amount of memory required to run an application on\n the recommendation option.

                                                                        \n

                                                                        Units: Percent

                                                                        \n \n

                                                                        The Memory metric is returned only for resources that have\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The name of the projected utilization metric.

                                                                      \n

                                                                      The following projected utilization metrics are returned:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Cpu - The projected percentage of allocated EC2 compute units\n that would be in use on the recommendation option had you used that resource\n during the analyzed period. This metric identifies the processing power required\n to run an application on the recommendation option.

                                                                        \n

                                                                        Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

                                                                        \n

                                                                        Units: Percent

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The percentage of memory that would be in use on the\n recommendation option had you used that resource during the analyzed period.\n This metric identifies the amount of memory required to run an application on\n the recommendation option.

                                                                        \n

                                                                        Units: Percent

                                                                        \n \n

                                                                        The Memory metric is returned only for resources that have\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      " } }, "timestamps": { @@ -3305,7 +3792,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a projected utilization metric of a recommendation option, such as an\n Amazon EC2 instance. This represents the projected utilization of a\n recommendation option had you used that resource during the analyzed period.

                                                                      \n\n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      \n\n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes a projected utilization metric of a recommendation option, such as an\n Amazon EC2 instance. This represents the projected utilization of a\n recommendation option had you used that resource during the analyzed period.

                                                                      \n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      \n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " } }, "com.amazonaws.computeoptimizer#ProjectedMetrics": { @@ -3320,6 +3807,72 @@ "target": "com.amazonaws.computeoptimizer#UtilizationMetric" } }, + "com.amazonaws.computeoptimizer#PutRecommendationPreferences": { + "type": "operation", + "input": { + "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest" + }, + "output": { + "target": "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.computeoptimizer#AccessDeniedException" + }, + { + "target": "com.amazonaws.computeoptimizer#InternalServerException" + }, + { + "target": "com.amazonaws.computeoptimizer#InvalidParameterValueException" + }, + { + "target": "com.amazonaws.computeoptimizer#MissingAuthenticationToken" + }, + { + "target": "com.amazonaws.computeoptimizer#OptInRequiredException" + }, + { + "target": "com.amazonaws.computeoptimizer#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.computeoptimizer#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.computeoptimizer#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a new recommendation preference or updates an existing recommendation\n preference, such as enhanced infrastructure metrics.

                                                                      \n

                                                                      For more information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#PutRecommendationPreferencesRequest": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The target resource type of the recommendation preference to create.

                                                                      \n

                                                                      The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

                                                                      ", + "smithy.api#required": {} + } + }, + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the scope of the recommendation preference to create.

                                                                      \n

                                                                      You can create recommendation preferences at the organization level (for management\n accounts of an organization only), account level, and resource level. For more\n information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      \n \n

                                                                      You cannot create recommendation preferences for Auto Scaling groups at the\n organization and account levels. You can create recommendation preferences for\n Auto Scaling groups only at the resource level by specifying a scope name\n of ResourceArn and a scope value of the Auto Scaling group Amazon\n Resource Name (ARN). This will configure the preference for all instances that are\n part of the specified the Auto Scaling group.

                                                                      \n
                                                                      " + } + }, + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the enhanced infrastructure metrics recommendation preference to create\n or update.

                                                                      \n

                                                                      A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied.

                                                                      " + } + } + } + }, + "com.amazonaws.computeoptimizer#PutRecommendationPreferencesResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.computeoptimizer#Rank": { "type": "integer" }, @@ -3396,7 +3949,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a recommendation export job.

                                                                      \n\n\n

                                                                      Use the DescribeRecommendationExportJobs action to view your\n recommendation export jobs.

                                                                      \n\n\n

                                                                      Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your\n recommendations.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a recommendation export job.

                                                                      \n

                                                                      Use the DescribeRecommendationExportJobs action to view your\n recommendation export jobs.

                                                                      \n

                                                                      Use the ExportAutoScalingGroupRecommendations or ExportEC2InstanceRecommendations actions to request an export of your\n recommendations.

                                                                      " } }, "com.amazonaws.computeoptimizer#RecommendationExportJobs": { @@ -3411,18 +3964,67 @@ "target": "com.amazonaws.computeoptimizer#InstanceRecommendationOption" } }, + "com.amazonaws.computeoptimizer#RecommendationPreferenceName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EnhancedInfrastructureMetrics", + "name": "ENHANCED_INFRASTRUCTURE_METRICS" + } + ] + } + }, + "com.amazonaws.computeoptimizer#RecommendationPreferenceNames": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferenceName" + } + }, "com.amazonaws.computeoptimizer#RecommendationPreferences": { "type": "structure", "members": { "cpuVendorArchitectures": { "target": "com.amazonaws.computeoptimizer#CpuVendorArchitectures", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

                                                                      \n\n

                                                                      For example, when you specify AWS_ARM64 with:

                                                                      \n\n " + "smithy.api#documentation": "

                                                                      Specifies the CPU vendor and architecture for Amazon EC2 instance and Auto Scaling group recommendations.

                                                                      \n

                                                                      For example, when you specify AWS_ARM64 with:

                                                                      \n " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the recommendation preferences to return in the response of a GetAutoScalingGroupRecommendations, GetEC2InstanceRecommendations, and GetEC2RecommendationProjectedMetrics request.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#RecommendationPreferencesDetail": { + "type": "structure", + "members": { + "scope": { + "target": "com.amazonaws.computeoptimizer#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the scope of the recommendation preference.

                                                                      \n

                                                                      Recommendation preferences can be created at the organization level (for management\n accounts of an organization only), account level, and resource level. For more\n information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.computeoptimizer#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The target resource type of the recommendation preference to create.

                                                                      \n

                                                                      The Ec2Instance option encompasses standalone instances and instances\n that are part of Auto Scaling groups. The AutoScalingGroup option\n encompasses only instances that are part of an Auto Scaling group.

                                                                      " + } + }, + "enhancedInfrastructureMetrics": { + "target": "com.amazonaws.computeoptimizer#EnhancedInfrastructureMetrics", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the enhanced infrastructure metrics recommendation preference.

                                                                      \n

                                                                      A status of Active confirms that the preference is applied in the latest\n recommendation refresh, and a status of Inactive confirms that it's not yet\n applied.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes preferences for recommendations.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a recommendation preference.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#RecommendationPreferencesDetails": { + "type": "list", + "member": { + "target": "com.amazonaws.computeoptimizer#RecommendationPreferencesDetail" } }, "com.amazonaws.computeoptimizer#RecommendationSource": { @@ -3495,7 +4097,7 @@ "recommendationResourceType": { "target": "com.amazonaws.computeoptimizer#RecommendationSourceType", "traits": { - "smithy.api#documentation": "

                                                                      The resource type of the recommendation.

                                                                      " + "smithy.api#documentation": "

                                                                      The resource type that the recommendation summary applies to.

                                                                      " } }, "accountId": { @@ -3503,6 +4105,18 @@ "traits": { "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the recommendation summary.

                                                                      " } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the savings opportunity for a given resource type. Savings\n opportunity includes the estimated monthly savings amount and percentage.

                                                                      " + } + }, + "currentPerformanceRiskRatings": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRiskRatings", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the performance risk ratings for a given resource\n type.

                                                                      " + } } }, "traits": { @@ -3524,7 +4138,7 @@ "rank": { "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

                                                                      The rank of the recommendation option projected metric.

                                                                      \n\n

                                                                      The top recommendation option is ranked as 1.

                                                                      \n\n

                                                                      The projected metric rank correlates to the recommendation option rank. For example,\n the projected metric ranked as 1 is related to the recommendation option\n that is also ranked as 1 in the same response.

                                                                      " + "smithy.api#documentation": "

                                                                      The rank of the recommendation option projected metric.

                                                                      \n

                                                                      The top recommendation option is ranked as 1.

                                                                      \n

                                                                      The projected metric rank correlates to the recommendation option rank. For example,\n the projected metric ranked as 1 is related to the recommendation option\n that is also ranked as 1 in the same response.

                                                                      " } }, "projectedMetrics": { @@ -3535,7 +4149,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a projected utilization metric of a recommendation option.

                                                                      \n\n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes a projected utilization metric of a recommendation option.

                                                                      \n \n

                                                                      The Cpu and Memory metrics are the only projected\n utilization metrics returned when you run the GetEC2RecommendationProjectedMetrics action. Additionally, the\n Memory metric is returned only for resources that have the unified\n CloudWatch agent installed on them. For more information, see Enabling Memory Utilization with the CloudWatch Agent.

                                                                      \n
                                                                      " } }, "com.amazonaws.computeoptimizer#RecommendedOptionProjectedMetrics": { @@ -3544,6 +4158,9 @@ "target": "com.amazonaws.computeoptimizer#RecommendedOptionProjectedMetric" } }, + "com.amazonaws.computeoptimizer#ResourceArn": { + "type": "string" + }, "com.amazonaws.computeoptimizer#ResourceNotFoundException": { "type": "structure", "members": { @@ -3592,13 +4209,13 @@ "key": { "target": "com.amazonaws.computeoptimizer#DestinationKey", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon S3 bucket key of an export file.

                                                                      \n\n

                                                                      The key uniquely identifies the object, or export file, in the S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon S3 bucket key of an export file.

                                                                      \n

                                                                      The key uniquely identifies the object, or export file, in the S3 bucket.

                                                                      " } }, "metadataKey": { "target": "com.amazonaws.computeoptimizer#MetadataKey", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon S3 bucket key of a metadata file.

                                                                      \n\n

                                                                      The key uniquely identifies the object, or metadata file, in the S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon S3 bucket key of a metadata file.

                                                                      \n

                                                                      The key uniquely identifies the object, or metadata file, in the S3 bucket.

                                                                      " } } }, @@ -3623,9 +4240,74 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and\n key prefix for a recommendations export job.

                                                                      \n\n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permission\n policy to allow Compute Optimizer to write the export file to it. If you plan to specify\n an object prefix when you create the export job, you must include the object prefix in\n the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes the destination Amazon Simple Storage Service (Amazon S3) bucket name and\n key prefix for a recommendations export job.

                                                                      \n

                                                                      You must create the destination Amazon S3 bucket for your recommendations\n export before you create the export job. Compute Optimizer does not create the S3 bucket\n for you. After you create the S3 bucket, ensure that it has the required permission\n policy to allow Compute Optimizer to write the export file to it. If you plan to specify\n an object prefix when you create the export job, you must include the object prefix in\n the policy that you add to the S3 bucket. For more information, see Amazon S3 Bucket Policy for Compute Optimizer in the\n Compute Optimizer User Guide.

                                                                      " + } + }, + "com.amazonaws.computeoptimizer#SavingsOpportunity": { + "type": "structure", + "members": { + "savingsOpportunityPercentage": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage", + "traits": { + "smithy.api#documentation": "

                                                                      The estimated monthly savings possible as a percentage of monthly cost.

                                                                      " + } + }, + "estimatedMonthlySavings": { + "target": "com.amazonaws.computeoptimizer#EstimatedMonthlySavings", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the estimated monthly savings amount possible based on\n On-Demand instance pricing.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the savings opportunity for recommendations of a given resource type or for\n the recommendation option of an individual resource.

                                                                      \n

                                                                      Savings opportunity represents the estimated monthly savings you can achieve by\n implementing a given Compute Optimizer recommendation.

                                                                      \n \n

                                                                      Savings opportunity data requires that you opt in to Cost Explorer, as well as\n activate Receive Amazon EC2 resource\n recommendations in the Cost Explorer preferences page. That\n creates a connection between Cost Explorer and Compute Optimizer. With this\n connection, Cost Explorer generates savings estimates considering the price of\n existing resources, the price of recommended resources, and historical usage data.\n Estimated monthly savings reflects the projected dollar savings associated with each\n of the recommendations generated. For more information, see Enabling Cost Explorer and Optimizing your cost\n with Rightsizing Recommendations in the Cost Management User\n Guide.

                                                                      \n
                                                                      " + } + }, + "com.amazonaws.computeoptimizer#SavingsOpportunityPercentage": { + "type": "double" + }, + "com.amazonaws.computeoptimizer#Scope": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.computeoptimizer#ScopeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the scope.

                                                                      \n

                                                                      The following scopes are possible:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Organization - Specifies that the recommendation preference\n applies at the organization level, for all member accounts of an\n organization.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AccountId - Specifies that the recommendation preference applies\n at the account level, for all resources of a given resource type in an\n account.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ResourceArn - Specifies that the recommendation preference\n applies at the individual resource level.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "value": { + "target": "com.amazonaws.computeoptimizer#ScopeValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the scope.

                                                                      \n

                                                                      If you specified the name of the scope as:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Organization - The value must be\n ALL_ACCOUNTS.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AccountId - The value must be a 12-digit Amazon Web Services account ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ResourceArn - The value must be the Amazon Resource\n Name (ARN) of an EC2 instance or an Auto Scaling group.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Only EC2 instance and Auto Scaling group ARNs are currently supported.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the scope of a recommendation preference.

                                                                      \n

                                                                      Recommendation preferences can be created at the organization level (for management\n accounts of an organization only), account level, and resource level. For more\n information, see Activating\n enhanced infrastructure metrics in the Compute Optimizer User\n Guide.

                                                                      \n \n

                                                                      You cannot create recommendation preferences for Auto Scaling groups at the\n organization and account levels. You can create recommendation preferences for\n Auto Scaling groups only at the resource level by specifying a scope name\n of ResourceArn and a scope value of the Auto Scaling group Amazon\n Resource Name (ARN). This will configure the preference for all instances that are\n part of the specified the Auto Scaling group.

                                                                      \n
                                                                      " + } + }, + "com.amazonaws.computeoptimizer#ScopeName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Organization", + "name": "ORGANIZATION" + }, + { + "value": "AccountId", + "name": "ACCOUNT_ID" + }, + { + "value": "ResourceArn", + "name": "RESOURCE_ARN" + } + ] } }, + "com.amazonaws.computeoptimizer#ScopeValue": { + "type": "string" + }, "com.amazonaws.computeoptimizer#ServiceUnavailableException": { "type": "structure", "members": { @@ -3754,7 +4436,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates the enrollment (opt in and opt out) status of an account to the Compute Optimizer service.

                                                                      \n\n

                                                                      If the account is a management account of an organization, this action can also be\n used to enroll member accounts of the organization.

                                                                      \n\n

                                                                      You must have the appropriate permissions to opt in to Compute Optimizer, to view its\n recommendations, and to opt out. For more information, see Controlling access with Amazon Web Services Identity and Access Management in the Compute Optimizer User Guide.

                                                                      \n\n

                                                                      When you opt in, Compute Optimizer automatically creates a service-linked role in your\n account to access its data. For more information, see Using\n Service-Linked Roles for Compute Optimizer in the Compute Optimizer User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the enrollment (opt in and opt out) status of an account to the Compute Optimizer service.

                                                                      \n

                                                                      If the account is a management account of an organization, this action can also be\n used to enroll member accounts of the organization.

                                                                      \n

                                                                      You must have the appropriate permissions to opt in to Compute Optimizer, to view its\n recommendations, and to opt out. For more information, see Controlling access with Amazon Web Services Identity and Access Management in the Compute Optimizer User Guide.

                                                                      \n

                                                                      When you opt in, Compute Optimizer automatically creates a service-linked role in your\n account to access its data. For more information, see Using\n Service-Linked Roles for Compute Optimizer in the Compute Optimizer User Guide.

                                                                      " } }, "com.amazonaws.computeoptimizer#UpdateEnrollmentStatusRequest": { @@ -3763,7 +4445,7 @@ "status": { "target": "com.amazonaws.computeoptimizer#Status", "traits": { - "smithy.api#documentation": "

                                                                      The new enrollment status of the account.

                                                                      \n\n

                                                                      The following status options are available:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n Active - Opts in your account to the Compute Optimizer service.\n Compute Optimizer begins analyzing the configuration and utilization metrics\n of your Amazon Web Services resources after you opt in. For more information, see\n Metrics analyzed by Compute Optimizer in the Compute Optimizer User Guide.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Inactive - Opts out your account from the Compute Optimizer\n service. Your account's recommendations and related metrics data will be deleted\n from Compute Optimizer after you opt out.

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      The Pending and Failed options cannot be used to update\n the enrollment status of an account. They are returned in the response of a request\n to update the enrollment status of an account.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The new enrollment status of the account.

                                                                      \n

                                                                      The following status options are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Active - Opts in your account to the Compute Optimizer service.\n Compute Optimizer begins analyzing the configuration and utilization metrics\n of your Amazon Web Services resources after you opt in. For more information, see\n Metrics analyzed by Compute Optimizer in the Compute Optimizer User Guide.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Inactive - Opts out your account from the Compute Optimizer\n service. Your account's recommendations and related metrics data will be deleted\n from Compute Optimizer after you opt out.

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      The Pending and Failed options cannot be used to update\n the enrollment status of an account. They are returned in the response of a request\n to update the enrollment status of an account.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, @@ -3798,13 +4480,13 @@ "name": { "target": "com.amazonaws.computeoptimizer#MetricName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n\n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Cpu - The percentage of allocated EC2 compute units that are\n currently in use on the instance. This metric identifies the processing power\n required to run an application on the instance.

                                                                        \n

                                                                        Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

                                                                        \n

                                                                        Units: Percent

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The percentage of memory that is currently in use on the\n instance. This metric identifies the amount of memory required to run an\n application on the instance.

                                                                        \n

                                                                        Units: Percent

                                                                        \n \n

                                                                        The Memory metric is returned only for resources that have\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_READ_OPS_PER_SECOND - The completed read operations from all\n EBS volumes attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_WRITE_OPS_PER_SECOND - The completed write operations to all\n EBS volumes attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes\n attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes\n attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_READ_OPS_PER_SECOND - The completed read operations from all\n instance store volumes available to the instance in a specified period of\n time.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_WRITE_OPS_PER_SECOND - The completed write operations from\n all instance store volumes available to the instance in a specified period of\n time.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_READ_BYTES_PER_SECOND - The bytes read from all instance\n store volumes available to the instance. This metric is used to determine the\n volume of the data the application reads from the disk of the instance. This can\n be used to determine the speed of the application.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_WRITE_BYTES_PER_SECOND - The bytes written to all instance\n store volumes available to the instance. This metric is used to determine the\n volume of the data the application writes onto the disk of the instance. This\n can be used to determine the speed of the application.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_IN_BYTES_PER_SECOND - The number of bytes received by the\n instance on all network interfaces. This metric identifies the volume of\n incoming network traffic to a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_OUT_BYTES_PER_SECOND - The number of bytes sent out by\n the instance on all network interfaces. This metric identifies the volume of\n outgoing network traffic from a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_PACKETS_IN_PER_SECOND - The number of packets received by\n the instance on all network interfaces. This metric identifies the volume of\n incoming traffic in terms of the number of packets on a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_PACKETS_OUT_PER_SECOND - The number of packets sent out\n by the instance on all network interfaces. This metric identifies the volume of\n outgoing traffic in terms of the number of packets on a single instance.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The name of the utilization metric.

                                                                      \n

                                                                      The following utilization metrics are available:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Cpu - The percentage of allocated EC2 compute units that are\n currently in use on the instance. This metric identifies the processing power\n required to run an application on the instance.

                                                                        \n

                                                                        Depending on the instance type, tools in your operating system can show a\n lower percentage than CloudWatch when the instance is not allocated a full\n processor core.

                                                                        \n

                                                                        Units: Percent

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Memory - The percentage of memory that is currently in use on the\n instance. This metric identifies the amount of memory required to run an\n application on the instance.

                                                                        \n

                                                                        Units: Percent

                                                                        \n \n

                                                                        The Memory metric is returned only for resources that have\n the unified CloudWatch agent installed on them. For more information,\n see Enabling Memory\n Utilization with the CloudWatch Agent.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_READ_OPS_PER_SECOND - The completed read operations from all\n EBS volumes attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_WRITE_OPS_PER_SECOND - The completed write operations to all\n EBS volumes attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Count

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_READ_BYTES_PER_SECOND - The bytes read from all EBS volumes\n attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n EBS_WRITE_BYTES_PER_SECOND - The bytes written to all EBS volumes\n attached to the instance in a specified period of time.

                                                                        \n

                                                                        Unit: Bytes

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_READ_OPS_PER_SECOND - The completed read operations from all\n instance store volumes available to the instance in a specified period of\n time.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_WRITE_OPS_PER_SECOND - The completed write operations from\n all instance store volumes available to the instance in a specified period of\n time.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_READ_BYTES_PER_SECOND - The bytes read from all instance\n store volumes available to the instance. This metric is used to determine the\n volume of the data the application reads from the disk of the instance. This can\n be used to determine the speed of the application.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISK_WRITE_BYTES_PER_SECOND - The bytes written to all instance\n store volumes available to the instance. This metric is used to determine the\n volume of the data the application writes onto the disk of the instance. This\n can be used to determine the speed of the application.

                                                                        \n

                                                                        If there are no instance store volumes, either the value is 0 or\n the metric is not reported.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_IN_BYTES_PER_SECOND - The number of bytes received by the\n instance on all network interfaces. This metric identifies the volume of\n incoming network traffic to a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_OUT_BYTES_PER_SECOND - The number of bytes sent out by\n the instance on all network interfaces. This metric identifies the volume of\n outgoing network traffic from a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_PACKETS_IN_PER_SECOND - The number of packets received by\n the instance on all network interfaces. This metric identifies the volume of\n incoming traffic in terms of the number of packets on a single instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NETWORK_PACKETS_OUT_PER_SECOND - The number of packets sent out\n by the instance on all network interfaces. This metric identifies the volume of\n outgoing traffic in terms of the number of packets on a single instance.

                                                                        \n
                                                                      • \n
                                                                      " } }, "statistic": { "target": "com.amazonaws.computeoptimizer#MetricStatistic", "traits": { - "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n\n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n\n\n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The statistic of the utilization metric.

                                                                      \n

                                                                      The Compute Optimizer API, Command Line Interface (CLI), and SDKs\n return utilization metrics using only the Maximum statistic, which is the\n highest value observed during the specified period.

                                                                      \n

                                                                      The Compute Optimizer console displays graphs for some utilization metrics using the\n Average statistic, which is the value of Sum /\n SampleCount during the specified period. For more information, see\n Viewing resource\n recommendations in the Compute Optimizer User\n Guide. You can also get averaged utilization metric data for your resources\n using Amazon CloudWatch. For more information, see the Amazon CloudWatch\n User Guide.

                                                                      " } }, "value": { @@ -3815,7 +4497,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a utilization metric of a resource, such as an Amazon EC2\n instance.

                                                                      \n\n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a utilization metric of a resource, such as an Amazon EC2\n instance.

                                                                      \n

                                                                      Compare the utilization metric data of your resource against its projected utilization\n metric data to determine the performance difference between your current resource and\n the recommended option.

                                                                      " } }, "com.amazonaws.computeoptimizer#UtilizationMetrics": { @@ -3824,6 +4506,12 @@ "target": "com.amazonaws.computeoptimizer#UtilizationMetric" } }, + "com.amazonaws.computeoptimizer#Value": { + "type": "double" + }, + "com.amazonaws.computeoptimizer#VeryLow": { + "type": "long" + }, "com.amazonaws.computeoptimizer#VolumeArn": { "type": "string" }, @@ -3851,7 +4539,7 @@ "volumeType": { "target": "com.amazonaws.computeoptimizer#VolumeType", "traits": { - "smithy.api#documentation": "

                                                                      The volume type.

                                                                      \n\n

                                                                      This can be gp2 for General Purpose SSD, io1 or\n io2 for Provisioned IOPS SSD, st1 for Throughput Optimized\n HDD, sc1 for Cold HDD, or standard for Magnetic\n volumes.

                                                                      " + "smithy.api#documentation": "

                                                                      The volume type.

                                                                      \n

                                                                      This can be gp2 for General Purpose SSD, io1 or\n io2 for Provisioned IOPS SSD, st1 for Throughput Optimized\n HDD, sc1 for Cold HDD, or standard for Magnetic\n volumes.

                                                                      " } }, "volumeSize": { @@ -3913,7 +4601,7 @@ "finding": { "target": "com.amazonaws.computeoptimizer#EBSFinding", "traits": { - "smithy.api#documentation": "

                                                                      The finding classification of the volume.

                                                                      \n\n

                                                                      Findings for volumes include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n NotOptimized\n —A volume is\n considered not optimized when Compute Optimizer identifies a recommendation\n that can provide better performance for your workload.

                                                                        \n \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An volume is\n considered optimized when Compute Optimizer determines that the volume is\n correctly provisioned to run your workload based on the chosen volume type. For\n optimized resources, Compute Optimizer might recommend a new generation volume\n type.

                                                                        \n \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The finding classification of the volume.

                                                                      \n

                                                                      Findings for volumes include:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n NotOptimized\n —A volume is\n considered not optimized when Compute Optimizer identifies a recommendation\n that can provide better performance for your workload.

                                                                        \n \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n Optimized\n —An volume is\n considered optimized when Compute Optimizer determines that the volume is\n correctly provisioned to run your workload based on the chosen volume type. For\n optimized resources, Compute Optimizer might recommend a new generation volume\n type.

                                                                        \n \n
                                                                      • \n
                                                                      " } }, "utilizationMetrics": { @@ -3937,7 +4625,13 @@ "lastRefreshTimestamp": { "target": "com.amazonaws.computeoptimizer#LastRefreshTimestamp", "traits": { - "smithy.api#documentation": "

                                                                      The timestamp of when the volume recommendation was last refreshed.

                                                                      " + "smithy.api#documentation": "

                                                                      The timestamp of when the volume recommendation was last generated.

                                                                      " + } + }, + "currentPerformanceRisk": { + "target": "com.amazonaws.computeoptimizer#CurrentPerformanceRisk", + "traits": { + "smithy.api#documentation": "

                                                                      The risk of the current EBS volume not meeting the performance needs of its workloads.\n The higher the risk, the more likely the current EBS volume doesn't have sufficient\n capacity.

                                                                      " } } }, @@ -3957,13 +4651,19 @@ "performanceRisk": { "target": "com.amazonaws.computeoptimizer#PerformanceRisk", "traits": { - "smithy.api#documentation": "

                                                                      The performance risk of the volume recommendation option.

                                                                      \n\n

                                                                      Performance risk is the likelihood of the recommended volume type meeting the\n performance requirement of your workload.

                                                                      \n\n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " + "smithy.api#documentation": "

                                                                      The performance risk of the volume recommendation option.

                                                                      \n

                                                                      Performance risk is the likelihood of the recommended volume type meeting the\n performance requirement of your workload.

                                                                      \n

                                                                      The value ranges from 0 - 4, with 0 meaning\n that the recommended resource is predicted to always provide enough hardware capability.\n The higher the performance risk is, the more likely you should validate whether the\n recommendation will meet the performance requirements of your workload before migrating\n your resource.

                                                                      " } }, "rank": { "target": "com.amazonaws.computeoptimizer#Rank", "traits": { - "smithy.api#documentation": "

                                                                      The rank of the volume recommendation option.

                                                                      \n\n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + "smithy.api#documentation": "

                                                                      The rank of the volume recommendation option.

                                                                      \n

                                                                      The top recommendation option is ranked as 1.

                                                                      " + } + }, + "savingsOpportunity": { + "target": "com.amazonaws.computeoptimizer#SavingsOpportunity", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the savings opportunity for the EBS volume recommendation\n option. Savings opportunity includes the estimated monthly savings amount and\n percentage.

                                                                      " } } }, diff --git a/codegen/sdk-codegen/aws-models/dataexchange.json b/codegen/sdk-codegen/aws-models/dataexchange.json index bf9819f249e5..2b01fbe0da90 100644 --- a/codegen/sdk-codegen/aws-models/dataexchange.json +++ b/codegen/sdk-codegen/aws-models/dataexchange.json @@ -60,6 +60,74 @@ "smithy.api#documentation": "

                                                                      What occurs after a certain event.

                                                                      " } }, + "com.amazonaws.dataexchange#ApiDescription": { + "type": "string", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the API.

                                                                      " + } + }, + "com.amazonaws.dataexchange#ApiGatewayApiAsset": { + "type": "structure", + "members": { + "ApiDescription": { + "target": "com.amazonaws.dataexchange#ApiDescription", + "traits": { + "smithy.api#documentation": "

                                                                      The API description of the API asset.

                                                                      " + } + }, + "ApiEndpoint": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API endpoint of the API asset.

                                                                      " + } + }, + "ApiId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The unique identifier of the API asset.

                                                                      " + } + }, + "ApiKey": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API key of the API asset.

                                                                      " + } + }, + "ApiName": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API name of the API asset.

                                                                      " + } + }, + "ApiSpecificationDownloadUrl": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The download URL of the API specification of the API asset.

                                                                      " + } + }, + "ApiSpecificationDownloadUrlExpiresAt": { + "target": "com.amazonaws.dataexchange#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the upload URL expires, in ISO 8601 format.

                                                                      " + } + }, + "ProtocolType": { + "target": "com.amazonaws.dataexchange#ProtocolType", + "traits": { + "smithy.api#documentation": "

                                                                      The protocol type of the API asset.

                                                                      " + } + }, + "Stage": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The stage of the API asset.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The API Gateway API that is the asset.

                                                                      " + } + }, "com.amazonaws.dataexchange#Arn": { "type": "string", "traits": { @@ -108,6 +176,12 @@ "traits": { "smithy.api#documentation": "

                                                                      The Amazon Redshift datashare that is the asset.

                                                                      " } + }, + "ApiGatewayApiAsset": { + "target": "com.amazonaws.dataexchange#ApiGatewayApiAsset", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the API Gateway API asset.

                                                                      " + } } }, "traits": { @@ -162,7 +236,7 @@ "Name": { "target": "com.amazonaws.dataexchange#AssetName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                                                      ", "smithy.api#required": {} } }, @@ -188,13 +262,13 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      An asset in AWS Data Exchange is a piece of data. The asset can be a structured data file, an image file, or some other data file that can be stored as an S3 object, or an Amazon Redshift datashare (Preview). When you create an import job for your files, you create an asset in AWS Data Exchange for each of those files.

                                                                      " + "smithy.api#documentation": "

                                                                      An asset in AWS Data Exchange is a piece of data (S3 object) or a means of fulfilling data (Amazon Redshift datashare or Amazon API Gateway API). The asset can be a structured data file, an image file, or some other data file that can be stored as an S3 object, an Amazon API Gateway API, or an Amazon Redshift datashare (Preview). When you create an import job for your files, API Gateway APIs, or Amazon Redshift datashares, you create an asset in AWS Data Exchange.

                                                                      " } }, "com.amazonaws.dataexchange#AssetName": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                                                      " } }, "com.amazonaws.dataexchange#AssetSourceEntry": { @@ -231,6 +305,10 @@ { "value": "REDSHIFT_DATA_SHARE", "name": "REDSHIFT_DATA_SHARE" + }, + { + "value": "API_GATEWAY_API", + "name": "API_GATEWAY_API" } ] } @@ -854,6 +932,21 @@ }, "com.amazonaws.dataexchange#DataExchange": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "DataExchange", + "arnNamespace": "dataexchange", + "cloudFormationName": "DataExchange", + "cloudTrailEventSource": "dataexchange.amazonaws.com", + "endpointPrefix": "dataexchange" + }, + "aws.auth#sigv4": { + "name": "dataexchange" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

                                                                      AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.

                                                                      As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.

                                                                      As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.

                                                                      A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets.

                                                                      ", + "smithy.api#title": "AWS Data Exchange" + }, "version": "2017-07-25", "operations": [ { @@ -916,6 +1009,9 @@ { "target": "com.amazonaws.dataexchange#ListTagsForResource" }, + { + "target": "com.amazonaws.dataexchange#SendApiAsset" + }, { "target": "com.amazonaws.dataexchange#StartJob" }, @@ -937,22 +1033,7 @@ { "target": "com.amazonaws.dataexchange#UpdateRevision" } - ], - "traits": { - "aws.api#service": { - "sdkId": "DataExchange", - "arnNamespace": "dataexchange", - "cloudFormationName": "DataExchange", - "cloudTrailEventSource": "dataexchange.amazonaws.com", - "endpointPrefix": "dataexchange" - }, - "aws.auth#sigv4": { - "name": "dataexchange" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "

                                                                      AWS Data Exchange is a service that makes it easy for AWS customers to exchange data in the cloud. You can use the AWS Data Exchange APIs to create, update, manage, and access file-based data set in the AWS Cloud.

                                                                      As a subscriber, you can view and access the data sets that you have an entitlement to through a subscription. You can use the APIS to download or copy your entitled data sets to Amazon S3 for use across a variety of AWS analytics and machine learning services.

                                                                      As a provider, you can create and manage your data sets that you would like to publish to a product. Being able to package and provide your data sets into products requires a few steps to determine eligibility. For more information, visit the AWS Data Exchange User Guide.

                                                                      A data set is a collection of data that can be changed or updated over time. Data sets can be updated using revisions, which represent a new version or incremental change to a data set. A revision contains one or more assets. An asset in AWS Data Exchange is a piece of data that can be stored as an Amazon S3 object. The asset can be a structured data file, an image file, or some other data file. Jobs are asynchronous import or export operations used to create or copy assets.

                                                                      ", - "smithy.api#title": "AWS Data Exchange" - } + ] }, "com.amazonaws.dataexchange#DataSetEntry": { "type": "structure", @@ -1666,7 +1747,7 @@ "Name": { "target": "com.amazonaws.dataexchange#AssetName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                                                      " } }, "RevisionId": { @@ -2103,6 +2184,158 @@ "smithy.api#documentation": "

                                                                      A unique identifier.

                                                                      " } }, + "com.amazonaws.dataexchange#ImportAssetFromApiGatewayApiRequestDetails": { + "type": "structure", + "members": { + "ApiDescription": { + "target": "com.amazonaws.dataexchange#ApiDescription", + "traits": { + "smithy.api#documentation": "

                                                                      The API description. Markdown supported.

                                                                      " + } + }, + "ApiId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API Gateway API ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiKey": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API Gateway API key.

                                                                      " + } + }, + "ApiName": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API name.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiSpecificationMd5Hash": { + "target": "com.amazonaws.dataexchange#__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093", + "traits": { + "smithy.api#documentation": "

                                                                      The Base64-encoded MD5 hash of the OpenAPI 3.0 JSON API specification file. It is used to ensure the integrity of the file.

                                                                      ", + "smithy.api#required": {} + } + }, + "DataSetId": { + "target": "com.amazonaws.dataexchange#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The data set ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "ProtocolType": { + "target": "com.amazonaws.dataexchange#ProtocolType", + "traits": { + "smithy.api#documentation": "

                                                                      The protocol type.

                                                                      ", + "smithy.api#required": {} + } + }, + "RevisionId": { + "target": "com.amazonaws.dataexchange#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The revision ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "Stage": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API stage.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request details.

                                                                      " + } + }, + "com.amazonaws.dataexchange#ImportAssetFromApiGatewayApiResponseDetails": { + "type": "structure", + "members": { + "ApiDescription": { + "target": "com.amazonaws.dataexchange#ApiDescription", + "traits": { + "smithy.api#documentation": "

                                                                      The API description.

                                                                      " + } + }, + "ApiId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiKey": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API key.

                                                                      " + } + }, + "ApiName": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API name.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiSpecificationMd5Hash": { + "target": "com.amazonaws.dataexchange#__stringMin24Max24PatternAZaZ094AZaZ092AZaZ093", + "traits": { + "smithy.api#documentation": "

                                                                      The Base64-encoded Md5 hash for the API asset, used to ensure the integrity of the API at that location.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiSpecificationUploadUrl": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The upload URL of the API specification.

                                                                      ", + "smithy.api#required": {} + } + }, + "ApiSpecificationUploadUrlExpiresAt": { + "target": "com.amazonaws.dataexchange#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the upload URL expires, in ISO 8601 format.

                                                                      ", + "smithy.api#required": {} + } + }, + "DataSetId": { + "target": "com.amazonaws.dataexchange#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The data set ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "ProtocolType": { + "target": "com.amazonaws.dataexchange#ProtocolType", + "traits": { + "smithy.api#documentation": "

                                                                      The protocol type.

                                                                      ", + "smithy.api#required": {} + } + }, + "RevisionId": { + "target": "com.amazonaws.dataexchange#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The revision ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "Stage": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The API stage.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The response details.

                                                                      " + } + }, "com.amazonaws.dataexchange#ImportAssetFromSignedUrlJobErrorDetails": { "type": "structure", "members": { @@ -2568,6 +2801,18 @@ { "value": "Amazon Redshift datashare assets per revision", "name": "Amazon_Redshift_datashare_assets_per_revision" + }, + { + "value": "Concurrent in progress jobs to import assets from an API Gateway API", + "name": "Concurrent_in_progress_jobs_to_import_assets_from_an_API_Gateway_API" + }, + { + "value": "Amazon API Gateway API assets per revision", + "name": "Amazon_API_Gateway_API_assets_per_revision" + }, + { + "value": "Revisions per Amazon API Gateway API data set", + "name": "Revisions_per_Amazon_API_Gateway_API_data_set" } ] } @@ -3170,6 +3415,17 @@ "smithy.api#documentation": "

                                                                      Information about the origin of the data set.

                                                                      " } }, + "com.amazonaws.dataexchange#ProtocolType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "REST", + "name": "REST" + } + ] + } + }, "com.amazonaws.dataexchange#RedshiftDataShareAsset": { "type": "structure", "members": { @@ -3238,6 +3494,12 @@ "traits": { "smithy.api#documentation": "

                                                                      Details from an import from Amazon Redshift datashare request.

                                                                      " } + }, + "ImportAssetFromApiGatewayApi": { + "target": "com.amazonaws.dataexchange#ImportAssetFromApiGatewayApiRequestDetails", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the import asset from API Gateway API request.

                                                                      " + } } }, "traits": { @@ -3338,6 +3600,12 @@ "traits": { "smithy.api#documentation": "

                                                                      Details from an import from Amazon Redshift datashare response.

                                                                      " } + }, + "ImportAssetFromApiGatewayApi": { + "target": "com.amazonaws.dataexchange#ImportAssetFromApiGatewayApiResponseDetails", + "traits": { + "smithy.api#documentation": "

                                                                      The response details.

                                                                      " + } } }, "traits": { @@ -3463,6 +3731,129 @@ "smithy.api#documentation": "

                                                                      The S3 object that is the asset.

                                                                      " } }, + "com.amazonaws.dataexchange#SendApiAsset": { + "type": "operation", + "input": { + "target": "com.amazonaws.dataexchange#SendApiAssetRequest" + }, + "output": { + "target": "com.amazonaws.dataexchange#SendApiAssetResponse" + }, + "errors": [ + { + "target": "com.amazonaws.dataexchange#AccessDeniedException" + }, + { + "target": "com.amazonaws.dataexchange#InternalServerException" + }, + { + "target": "com.amazonaws.dataexchange#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.dataexchange#ThrottlingException" + }, + { + "target": "com.amazonaws.dataexchange#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      This operation invokes an API Gateway API asset. The request is proxied to the provider’s API Gateway API.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api-fulfill." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/v1", + "code": 200 + } + } + }, + "com.amazonaws.dataexchange#SendApiAssetRequest": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The request body.

                                                                      ", + "smithy.api#httpPayload": {} + } + }, + "QueryStringParameters": { + "target": "com.amazonaws.dataexchange#MapOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      Attach query string parameters to the end of the URI (for example, /v1/examplePath?exampleParam=exampleValue).

                                                                      ", + "smithy.api#httpQueryParams": {} + } + }, + "AssetId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      Asset ID value for the API request.

                                                                      ", + "smithy.api#httpHeader": "x-amzn-dataexchange-asset-id", + "smithy.api#required": {} + } + }, + "DataSetId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      Data set ID value for the API request.

                                                                      ", + "smithy.api#httpHeader": "x-amzn-dataexchange-data-set-id", + "smithy.api#required": {} + } + }, + "RequestHeaders": { + "target": "com.amazonaws.dataexchange#MapOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      Any header value prefixed with x-amzn-dataexchange-header- will have that stripped before sending the Asset API request. Use this when you want to override a header that AWS Data Exchange uses. Alternatively, you can use the header without a prefix to the HTTP request.

                                                                      ", + "smithy.api#httpPrefixHeaders": "x-amzn-dataexchange-header-" + } + }, + "Method": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      HTTP method value for the API request. Alternatively, you can use the appropriate verb in your request.

                                                                      ", + "smithy.api#httpHeader": "x-amzn-dataexchange-http-method" + } + }, + "Path": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      URI path value for the API request. Alternatively, you can set the URI path directly by invoking /v1/{pathValue}

                                                                      ", + "smithy.api#httpHeader": "x-amzn-dataexchange-path" + } + }, + "RevisionId": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      Revision ID value for the API request.

                                                                      ", + "smithy.api#httpHeader": "x-amzn-dataexchange-revision-id", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request body for SendApiAsset.

                                                                      " + } + }, + "com.amazonaws.dataexchange#SendApiAssetResponse": { + "type": "structure", + "members": { + "Body": { + "target": "com.amazonaws.dataexchange#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The response body from the underlying API tracked by the API asset.

                                                                      ", + "smithy.api#httpPayload": {} + } + }, + "ResponseHeaders": { + "target": "com.amazonaws.dataexchange#MapOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      The response headers from the underlying API tracked by the API asset.

                                                                      ", + "smithy.api#httpPrefixHeaders": "" + } + } + } + }, "com.amazonaws.dataexchange#ServerSideEncryptionTypes": { "type": "string", "traits": { @@ -3682,6 +4073,10 @@ { "value": "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES", "name": "IMPORT_ASSETS_FROM_REDSHIFT_DATA_SHARES" + }, + { + "value": "IMPORT_ASSET_FROM_API_GATEWAY_API", + "name": "IMPORT_ASSET_FROM_API_GATEWAY_API" } ] } @@ -3780,7 +4175,7 @@ "Name": { "target": "com.amazonaws.dataexchange#AssetName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                                                      ", "smithy.api#required": {} } }, @@ -3839,7 +4234,7 @@ "Name": { "target": "com.amazonaws.dataexchange#AssetName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the asset. When importing from Amazon S3, the S3 object key is used as the asset name. When exporting to Amazon S3, the asset name is used as default target S3 object key. When importing from Amazon API Gateway API, the API name is used as the asset name. When importing from Amazon Redshift, the datashare name is used as the asset name.

                                                                      " } }, "RevisionId": { diff --git a/codegen/sdk-codegen/aws-models/ec2.json b/codegen/sdk-codegen/aws-models/ec2.json index 5429bdfe3370..28466d926985 100644 --- a/codegen/sdk-codegen/aws-models/ec2.json +++ b/codegen/sdk-codegen/aws-models/ec2.json @@ -2137,6 +2137,9 @@ { "target": "com.amazonaws.ec2#DescribeSnapshots" }, + { + "target": "com.amazonaws.ec2#DescribeSnapshotTierStatus" + }, { "target": "com.amazonaws.ec2#DescribeSpotDatafeedSubscription" }, @@ -2467,6 +2470,9 @@ { "target": "com.amazonaws.ec2#ImportVolume" }, + { + "target": "com.amazonaws.ec2#ListSnapshotsInRecycleBin" + }, { "target": "com.amazonaws.ec2#ModifyAddressAttribute" }, @@ -2548,6 +2554,9 @@ { "target": "com.amazonaws.ec2#ModifySnapshotAttribute" }, + { + "target": "com.amazonaws.ec2#ModifySnapshotTier" + }, { "target": "com.amazonaws.ec2#ModifySpotFleetRequest" }, @@ -2719,6 +2728,12 @@ { "target": "com.amazonaws.ec2#RestoreManagedPrefixListVersion" }, + { + "target": "com.amazonaws.ec2#RestoreSnapshotFromRecycleBin" + }, + { + "target": "com.amazonaws.ec2#RestoreSnapshotTier" + }, { "target": "com.amazonaws.ec2#RevokeClientVpnIngress" }, @@ -24958,6 +24973,81 @@ } } }, + "com.amazonaws.ec2#DescribeSnapshotTierStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#DescribeSnapshotTierStatusRequest" + }, + "output": { + "target": "com.amazonaws.ec2#DescribeSnapshotTierStatusResult" + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the storage tier status of one or more Amazon EBS snapshots.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "SnapshotTierStatuses", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#DescribeSnapshotTierStatusMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.ec2#DescribeSnapshotTierStatusRequest": { + "type": "structure", + "members": { + "Filters": { + "target": "com.amazonaws.ec2#FilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The filters.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n snapshot-id - The snapshot ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n volume-id - The ID of the volume the snapshot is for.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n last-tiering-operation - The state of the last archive or restore action. (archiving | archival_error |\n archival_complete | restoring | restore_error | restore_complete)

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#xmlName": "Filter" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

                                                                      The token for the next page of results.

                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.ec2#DescribeSnapshotTierStatusMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken value.

                                                                      " + } + } + } + }, + "com.amazonaws.ec2#DescribeSnapshotTierStatusResult": { + "type": "structure", + "members": { + "SnapshotTierStatuses": { + "target": "com.amazonaws.ec2#snapshotTierStatusSet", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotTierStatusSet", + "smithy.api#documentation": "

                                                                      Information about the snapshot's storage tier.

                                                                      ", + "smithy.api#xmlName": "snapshotTierStatusSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

                                                                      The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                                                      ", + "smithy.api#xmlName": "nextToken" + } + } + } + }, "com.amazonaws.ec2#DescribeSnapshots": { "type": "operation", "input": { @@ -25009,7 +25099,7 @@ "Filters": { "target": "com.amazonaws.ec2#FilterList", "traits": { - "smithy.api#documentation": "

                                                                      The filters.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n description - A description of the snapshot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n encrypted - Indicates whether the snapshot is encrypted\n (true | false)

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n owner-alias - The owner alias, from an Amazon-maintained list \n (amazon). \n This is not the user-configured Amazon Web Services account alias set using the IAM console.\n We recommend that you use the related parameter instead of this filter.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n owner-id - The Amazon Web Services account ID of the owner. We recommend that \n you use the related parameter instead of this filter.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n progress - The progress of the snapshot, as a percentage (for example,\n 80%).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n snapshot-id - The snapshot ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n start-time - The time stamp when the snapshot was initiated.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n status - The status of the snapshot (pending |\n completed | error).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n volume-id - The ID of the volume the snapshot is for.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n volume-size - The size of the volume, in GiB.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The filters.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n description - A description of the snapshot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n encrypted - Indicates whether the snapshot is encrypted\n (true | false)

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n owner-alias - The owner alias, from an Amazon-maintained list \n (amazon). \n This is not the user-configured Amazon Web Services account alias set using the IAM console.\n We recommend that you use the related parameter instead of this filter.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n owner-id - The Amazon Web Services account ID of the owner. We recommend that \n you use the related parameter instead of this filter.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n progress - The progress of the snapshot, as a percentage (for example,\n 80%).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n snapshot-id - The snapshot ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n start-time - The time stamp when the snapshot was initiated.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n status - The status of the snapshot (pending |\n completed | error).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n storage-tier - The storage tier of the snapshot (archive |\n standard).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n tag: - The key/value combination of a tag assigned to the resource. Use the tag key in the filter name and the tag value as the filter value.\n For example, to find all resources that have a tag with the key Owner and the value TeamA, specify tag:Owner for the filter name and TeamA for the filter value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n tag-key - The key of a tag assigned to the resource. Use this filter to find all resources assigned a tag with a specific key, regardless of the tag value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n volume-id - The ID of the volume the snapshot is for.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n volume-size - The size of the volume, in GiB.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#xmlName": "Filter" } }, @@ -43689,6 +43779,46 @@ "value": "m6gd.16xlarge", "name": "m6gd_16xlarge" }, + { + "value": "m6a.large", + "name": "m6a_large" + }, + { + "value": "m6a.xlarge", + "name": "m6a_xlarge" + }, + { + "value": "m6a.2xlarge", + "name": "m6a_2xlarge" + }, + { + "value": "m6a.4xlarge", + "name": "m6a_4xlarge" + }, + { + "value": "m6a.8xlarge", + "name": "m6a_8xlarge" + }, + { + "value": "m6a.12xlarge", + "name": "m6a_12xlarge" + }, + { + "value": "m6a.16xlarge", + "name": "m6a_16xlarge" + }, + { + "value": "m6a.24xlarge", + "name": "m6a_24xlarge" + }, + { + "value": "m6a.32xlarge", + "name": "m6a_32xlarge" + }, + { + "value": "m6a.48xlarge", + "name": "m6a_48xlarge" + }, { "value": "m6i.large", "name": "m6i_large" @@ -43777,6 +43907,78 @@ "value": "vt1.24xlarge", "name": "vt1_24xlarge" }, + { + "value": "im4gn.16xlarge", + "name": "im4gn_16xlarge" + }, + { + "value": "im4gn.2xlarge", + "name": "im4gn_2xlarge" + }, + { + "value": "im4gn.4xlarge", + "name": "im4gn_4xlarge" + }, + { + "value": "im4gn.8xlarge", + "name": "im4gn_8xlarge" + }, + { + "value": "im4gn.large", + "name": "im4gn_large" + }, + { + "value": "im4gn.xlarge", + "name": "im4gn_xlarge" + }, + { + "value": "is4gen.2xlarge", + "name": "is4gen_2xlarge" + }, + { + "value": "is4gen.4xlarge", + "name": "is4gen_4xlarge" + }, + { + "value": "is4gen.8xlarge", + "name": "is4gen_8xlarge" + }, + { + "value": "is4gen.large", + "name": "is4gen_large" + }, + { + "value": "is4gen.medium", + "name": "is4gen_medium" + }, + { + "value": "is4gen.xlarge", + "name": "is4gen_xlarge" + }, + { + "value": "g5g.xlarge", + "name": "g5g_xlarge" + }, + { + "value": "g5g.2xlarge", + "name": "g5g_2xlarge" + }, + { + "value": "g5g.4xlarge", + "name": "g5g_4xlarge" + }, + { + "value": "g5g.8xlarge", + "name": "g5g_8xlarge" + }, + { + "value": "g5g.16xlarge", + "name": "g5g_16xlarge" + }, + { + "value": "g5g.metal", + "name": "g5g_metal" + }, { "value": "g5.xlarge", "name": "g5_xlarge" @@ -46970,6 +47172,85 @@ } } }, + "com.amazonaws.ec2#ListSnapshotsInRecycleBin": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ListSnapshotsInRecycleBinRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ListSnapshotsInRecycleBinResult" + }, + "traits": { + "smithy.api#documentation": "

                                                                      Lists one or more snapshots that are currently in the Recycle Bin.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Snapshots", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.ec2#ListSnapshotsInRecycleBinMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 5, + "max": 1000 + } + } + }, + "com.amazonaws.ec2#ListSnapshotsInRecycleBinRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.ec2#ListSnapshotsInRecycleBinMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return with a single call.\n\tTo retrieve the remaining results, make another call with the returned nextToken value.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "smithy.api#documentation": "

                                                                      The token for the next page of results.

                                                                      " + } + }, + "SnapshotIds": { + "target": "com.amazonaws.ec2#SnapshotIdStringList", + "traits": { + "smithy.api#documentation": "

                                                                      The IDs of the snapshots to list. Omit this parameter to list all of the \n snapshots that are in the Recycle Bin.

                                                                      ", + "smithy.api#xmlName": "SnapshotId" + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

                                                                      " + } + } + } + }, + "com.amazonaws.ec2#ListSnapshotsInRecycleBinResult": { + "type": "structure", + "members": { + "Snapshots": { + "target": "com.amazonaws.ec2#SnapshotRecycleBinInfoList", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotSet", + "smithy.api#documentation": "

                                                                      Information about the snapshots.

                                                                      ", + "smithy.api#xmlName": "snapshotSet" + } + }, + "NextToken": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "NextToken", + "smithy.api#documentation": "

                                                                      The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                                                      ", + "smithy.api#xmlName": "nextToken" + } + } + } + }, "com.amazonaws.ec2#ListingState": { "type": "string", "traits": { @@ -49931,6 +50212,63 @@ } } }, + "com.amazonaws.ec2#ModifySnapshotTier": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#ModifySnapshotTierRequest" + }, + "output": { + "target": "com.amazonaws.ec2#ModifySnapshotTierResult" + }, + "traits": { + "smithy.api#documentation": "

                                                                      Archives an Amazon EBS snapshot. When you archive a snapshot, it is converted to a full \n snapshot that includes all of the blocks of data that were written to the volume at the \n time the snapshot was created, and moved from the standard tier to the archive \n tier. For more information, see Archive Amazon EBS snapshots \n in the Amazon Elastic Compute Cloud User Guide.

                                                                      " + } + }, + "com.amazonaws.ec2#ModifySnapshotTierRequest": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#required": {} + } + }, + "StorageTier": { + "target": "com.amazonaws.ec2#TargetStorageTier", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the storage tier. You must specify archive.

                                                                      " + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

                                                                      " + } + } + } + }, + "com.amazonaws.ec2#ModifySnapshotTierResult": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#xmlName": "snapshotId" + } + }, + "TieringStartTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "TieringStartTime", + "smithy.api#documentation": "

                                                                      The date and time when the archive process was started.

                                                                      ", + "smithy.api#xmlName": "tieringStartTime" + } + } + } + }, "com.amazonaws.ec2#ModifySpotFleetRequest": { "type": "operation", "input": { @@ -50017,7 +50355,7 @@ "target": "com.amazonaws.ec2#ModifySubnetAttributeRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Modifies a subnet attribute. You can only modify one attribute at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      Modifies a subnet attribute. You can only modify one attribute at a time.

                                                                      \n\t \n\t

                                                                      Use this action to modify subnets on Amazon Web Services Outposts.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        To modify a subnet on an Outpost rack, set both\n MapCustomerOwnedIpOnLaunch and\n CustomerOwnedIpv4Pool. These two parameters act as a single\n attribute.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        To modify a subnet on an Outpost server, set either\n EnableLniAtDeviceIndex or\n DisableLniAtDeviceIndex.

                                                                        \n
                                                                      • \n
                                                                      \n\t \n\t

                                                                      For more information about Amazon Web Services Outposts, see the following:

                                                                      \n\t \n\t " } }, "com.amazonaws.ec2#ModifySubnetAttributeRequest": { @@ -50079,6 +50417,18 @@ "traits": { "smithy.api#documentation": "

                                                                      Indicates whether to respond to DNS queries for instance hostnames with DNS AAAA records.

                                                                      " } + }, + "EnableLniAtDeviceIndex": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      \n Indicates the device position for local network interfaces in this subnet. For example, \n 1 indicates local network interfaces in this subnet are the secondary \n network interface (eth1). A local network interface cannot be the primary network\n interface (eth0).\n

                                                                      " + } + }, + "DisableLniAtDeviceIndex": { + "target": "com.amazonaws.ec2#AttributeBooleanValue", + "traits": { + "smithy.api#documentation": "

                                                                      \n Specify true to indicate that local network interfaces at the current \n position should be disabled. \n

                                                                      " + } } } }, @@ -59925,6 +60275,206 @@ } } }, + "com.amazonaws.ec2#RestoreSnapshotFromRecycleBin": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#RestoreSnapshotFromRecycleBinRequest" + }, + "output": { + "target": "com.amazonaws.ec2#RestoreSnapshotFromRecycleBinResult" + }, + "traits": { + "smithy.api#documentation": "

                                                                      Restores a snapshot from the Recycle Bin. For more information, see Restore \n snapshots from the Recycle Bin in the Amazon Elastic Compute Cloud User Guide.

                                                                      " + } + }, + "com.amazonaws.ec2#RestoreSnapshotFromRecycleBinRequest": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot to restore.

                                                                      ", + "smithy.api#required": {} + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

                                                                      " + } + } + } + }, + "com.amazonaws.ec2#RestoreSnapshotFromRecycleBinResult": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#xmlName": "snapshotId" + } + }, + "OutpostArn": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OutpostArn", + "smithy.api#documentation": "

                                                                      The ARN of the Outpost on which the snapshot is stored. For more information, see Amazon EBS local snapshots on Outposts in the \n Amazon Elastic Compute Cloud User Guide.

                                                                      ", + "smithy.api#xmlName": "outpostArn" + } + }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

                                                                      The description for the snapshot.

                                                                      ", + "smithy.api#xmlName": "description" + } + }, + "Encrypted": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "Encrypted", + "smithy.api#documentation": "

                                                                      Indicates whether the snapshot is encrypted.

                                                                      ", + "smithy.api#xmlName": "encrypted" + } + }, + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account that owns the EBS snapshot.

                                                                      ", + "smithy.api#xmlName": "ownerId" + } + }, + "Progress": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Progress", + "smithy.api#documentation": "

                                                                      The progress of the snapshot, as a percentage.

                                                                      ", + "smithy.api#xmlName": "progress" + } + }, + "StartTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "StartTime", + "smithy.api#documentation": "

                                                                      The time stamp when the snapshot was initiated.

                                                                      ", + "smithy.api#xmlName": "startTime" + } + }, + "State": { + "target": "com.amazonaws.ec2#SnapshotState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "

                                                                      The state of the snapshot.

                                                                      ", + "smithy.api#xmlName": "status" + } + }, + "VolumeId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

                                                                      The ID of the volume that was used to create the snapshot.

                                                                      ", + "smithy.api#xmlName": "volumeId" + } + }, + "VolumeSize": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "VolumeSize", + "smithy.api#documentation": "

                                                                      The size of the volume, in GiB.

                                                                      ", + "smithy.api#xmlName": "volumeSize" + } + } + } + }, + "com.amazonaws.ec2#RestoreSnapshotTier": { + "type": "operation", + "input": { + "target": "com.amazonaws.ec2#RestoreSnapshotTierRequest" + }, + "output": { + "target": "com.amazonaws.ec2#RestoreSnapshotTierResult" + }, + "traits": { + "smithy.api#documentation": "

                                                                      Restores an archived Amazon EBS snapshot for use temporarily or permanently, or modifies the restore \n period or restore type for a snapshot that was previously temporarily restored.

                                                                      \n \n

                                                                      For more information see \n Restore an archived snapshot and \n modify the restore period or restore type for a temporarily restored snapshot in the Amazon Elastic Compute Cloud User Guide.

                                                                      " + } + }, + "com.amazonaws.ec2#RestoreSnapshotTierRequest": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot to restore.

                                                                      ", + "smithy.api#required": {} + } + }, + "TemporaryRestoreDays": { + "target": "com.amazonaws.ec2#RestoreSnapshotTierRequestTemporaryRestoreDays", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the number of days for which to temporarily restore an archived snapshot. \n Required for temporary restores only. The snapshot will be automatically re-archived \n after this period.

                                                                      \n

                                                                      To temporarily restore an archived snapshot, specify the number of days and omit \n the PermanentRestore parameter or set it to \n false.

                                                                      " + } + }, + "PermanentRestore": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates whether to permanently restore an archived snapshot. To permanently restore \n an archived snapshot, specify true and omit the \n RestoreSnapshotTierRequest$TemporaryRestoreDays parameter.

                                                                      " + } + }, + "DryRun": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Checks whether you have the required permissions for the action, without actually making the request, \n and provides an error response. If you have the required permissions, the error response is DryRunOperation. \n Otherwise, it is UnauthorizedOperation.

                                                                      " + } + } + } + }, + "com.amazonaws.ec2#RestoreSnapshotTierRequestTemporaryRestoreDays": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.ec2#RestoreSnapshotTierResult": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#xmlName": "snapshotId" + } + }, + "RestoreStartTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RestoreStartTime", + "smithy.api#documentation": "

                                                                      The date and time when the snapshot restore process started.

                                                                      ", + "smithy.api#xmlName": "restoreStartTime" + } + }, + "RestoreDuration": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "RestoreDuration", + "smithy.api#documentation": "

                                                                      For temporary restores only. The number of days for which the archived snapshot \n is temporarily restored.

                                                                      ", + "smithy.api#xmlName": "restoreDuration" + } + }, + "IsPermanentRestore": { + "target": "com.amazonaws.ec2#Boolean", + "traits": { + "aws.protocols#ec2QueryName": "IsPermanentRestore", + "smithy.api#documentation": "

                                                                      Indicates whether the snapshot is permanently restored. true indicates a permanent \n restore. false indicates a temporary restore.

                                                                      ", + "smithy.api#xmlName": "isPermanentRestore" + } + } + } + }, "com.amazonaws.ec2#ResultRange": { "type": "integer", "traits": { @@ -62989,6 +63539,22 @@ "smithy.api#documentation": "

                                                                      Any tags assigned to the snapshot.

                                                                      ", "smithy.api#xmlName": "tagSet" } + }, + "StorageTier": { + "target": "com.amazonaws.ec2#StorageTier", + "traits": { + "aws.protocols#ec2QueryName": "StorageTier", + "smithy.api#documentation": "

                                                                      The storage tier in which the snapshot is stored. standard indicates \n that the snapshot is stored in the standard snapshot storage tier and that it is ready \n for use. archive indicates that the snapshot is currently archived and that \n it must be restored before it can be used.

                                                                      ", + "smithy.api#xmlName": "storageTier" + } + }, + "RestoreExpiryTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RestoreExpiryTime", + "smithy.api#documentation": "

                                                                      Only for archived snapshots that are temporarily restored. Indicates the date and \n time when a temporarily restored snapshot will be automatically re-archived.

                                                                      ", + "smithy.api#xmlName": "restoreExpiryTime" + } } }, "traits": { @@ -63256,6 +63822,63 @@ } } }, + "com.amazonaws.ec2#SnapshotRecycleBinInfo": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#xmlName": "snapshotId" + } + }, + "RecycleBinEnterTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RecycleBinEnterTime", + "smithy.api#documentation": "

                                                                      The date and time when the snaphsot entered the Recycle Bin.

                                                                      ", + "smithy.api#xmlName": "recycleBinEnterTime" + } + }, + "RecycleBinExitTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RecycleBinExitTime", + "smithy.api#documentation": "

                                                                      The date and time when the snapshot is to be permanently deleted from the Recycle Bin.

                                                                      ", + "smithy.api#xmlName": "recycleBinExitTime" + } + }, + "Description": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "Description", + "smithy.api#documentation": "

                                                                      The description for the snapshot.

                                                                      ", + "smithy.api#xmlName": "description" + } + }, + "VolumeId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

                                                                      The ID of the volume from which the snapshot was created.

                                                                      ", + "smithy.api#xmlName": "volumeId" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a snapshot that is currently in the Recycle Bin.

                                                                      " + } + }, + "com.amazonaws.ec2#SnapshotRecycleBinInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#SnapshotRecycleBinInfo", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#SnapshotSet": { "type": "list", "member": { @@ -63280,6 +63903,14 @@ { "value": "error", "name": "error" + }, + { + "value": "recoverable", + "name": "recoverable" + }, + { + "value": "recovering", + "name": "recovering" } ] } @@ -63380,6 +64011,110 @@ "smithy.api#documentation": "

                                                                      Details about the import snapshot task.

                                                                      " } }, + "com.amazonaws.ec2#SnapshotTierStatus": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.ec2#SnapshotId", + "traits": { + "aws.protocols#ec2QueryName": "SnapshotId", + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      ", + "smithy.api#xmlName": "snapshotId" + } + }, + "VolumeId": { + "target": "com.amazonaws.ec2#VolumeId", + "traits": { + "aws.protocols#ec2QueryName": "VolumeId", + "smithy.api#documentation": "

                                                                      The ID of the volume from which the snapshot was created.

                                                                      ", + "smithy.api#xmlName": "volumeId" + } + }, + "Status": { + "target": "com.amazonaws.ec2#SnapshotState", + "traits": { + "aws.protocols#ec2QueryName": "Status", + "smithy.api#documentation": "

                                                                      The state of the snapshot.

                                                                      ", + "smithy.api#xmlName": "status" + } + }, + "OwnerId": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "OwnerId", + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account that owns the snapshot.

                                                                      ", + "smithy.api#xmlName": "ownerId" + } + }, + "Tags": { + "target": "com.amazonaws.ec2#TagList", + "traits": { + "aws.protocols#ec2QueryName": "TagSet", + "smithy.api#documentation": "

                                                                      The tags that are assigned to the snapshot.

                                                                      ", + "smithy.api#xmlName": "tagSet" + } + }, + "StorageTier": { + "target": "com.amazonaws.ec2#StorageTier", + "traits": { + "aws.protocols#ec2QueryName": "StorageTier", + "smithy.api#documentation": "

                                                                      The storage tier in which the snapshot is stored. standard indicates \n that the snapshot is stored in the standard snapshot storage tier and that it is ready \n for use. archive indicates that the snapshot is currently archived and that \n it must be restored before it can be used.

                                                                      ", + "smithy.api#xmlName": "storageTier" + } + }, + "LastTieringStartTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "LastTieringStartTime", + "smithy.api#documentation": "

                                                                      The date and time when the last archive or restore process was started.

                                                                      ", + "smithy.api#xmlName": "lastTieringStartTime" + } + }, + "LastTieringProgress": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "LastTieringProgress", + "smithy.api#documentation": "

                                                                      The progress of the last archive or restore process, as a percentage.

                                                                      ", + "smithy.api#xmlName": "lastTieringProgress" + } + }, + "LastTieringOperationStatus": { + "target": "com.amazonaws.ec2#TieringOperationStatus", + "traits": { + "aws.protocols#ec2QueryName": "LastTieringOperationStatus", + "smithy.api#documentation": "

                                                                      The status of the last archive or restore process.

                                                                      ", + "smithy.api#xmlName": "lastTieringOperationStatus" + } + }, + "LastTieringOperationStatusDetail": { + "target": "com.amazonaws.ec2#String", + "traits": { + "aws.protocols#ec2QueryName": "LastTieringOperationStatusDetail", + "smithy.api#documentation": "

                                                                      A message describing the status of the last archive or restore process.

                                                                      ", + "smithy.api#xmlName": "lastTieringOperationStatusDetail" + } + }, + "ArchivalCompleteTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "ArchivalCompleteTime", + "smithy.api#documentation": "

                                                                      The date and time when the last archive process was completed.

                                                                      ", + "smithy.api#xmlName": "archivalCompleteTime" + } + }, + "RestoreExpiryTime": { + "target": "com.amazonaws.ec2#MillisecondDateTime", + "traits": { + "aws.protocols#ec2QueryName": "RestoreExpiryTime", + "smithy.api#documentation": "

                                                                      Only for archived snapshots that are temporarily restored. Indicates the date and \n time when a temporarily restored snapshot will be automatically re-archived.

                                                                      ", + "smithy.api#xmlName": "restoreExpiryTime" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Provides information about a snapshot's storage tier.

                                                                      " + } + }, "com.amazonaws.ec2#SpotAllocationStrategy": { "type": "string", "traits": { @@ -65124,6 +65859,21 @@ "smithy.api#documentation": "

                                                                      Describes a storage location in Amazon S3.

                                                                      " } }, + "com.amazonaws.ec2#StorageTier": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "archive", + "name": "archive" + }, + { + "value": "standard", + "name": "standard" + } + ] + } + }, "com.amazonaws.ec2#StoreImageTaskResult": { "type": "structure", "members": { @@ -65252,6 +66002,14 @@ "smithy.api#xmlName": "defaultForAz" } }, + "EnableLniAtDeviceIndex": { + "target": "com.amazonaws.ec2#Integer", + "traits": { + "aws.protocols#ec2QueryName": "EnableLniAtDeviceIndex", + "smithy.api#documentation": "

                                                                      \n Indicates the device position for local network interfaces in this subnet. For example, \n 1 indicates local network interfaces in this subnet are the secondary \n network interface (eth1). \n

                                                                      ", + "smithy.api#xmlName": "enableLniAtDeviceIndex" + } + }, "MapPublicIpOnLaunch": { "target": "com.amazonaws.ec2#Boolean", "traits": { @@ -66135,6 +66893,17 @@ } } }, + "com.amazonaws.ec2#TargetStorageTier": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "archive", + "name": "archive" + } + ] + } + }, "com.amazonaws.ec2#TelemetryStatus": { "type": "string", "traits": { @@ -66342,6 +67111,49 @@ } } }, + "com.amazonaws.ec2#TieringOperationStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "archival-in-progress", + "name": "archival_in_progress" + }, + { + "value": "archival-completed", + "name": "archival_completed" + }, + { + "value": "archival-failed", + "name": "archival_failed" + }, + { + "value": "temporary-restore-in-progress", + "name": "temporary_restore_in_progress" + }, + { + "value": "temporary-restore-completed", + "name": "temporary_restore_completed" + }, + { + "value": "temporary-restore-failed", + "name": "temporary_restore_failed" + }, + { + "value": "permanent-restore-in-progress", + "name": "permanent_restore_in_progress" + }, + { + "value": "permanent-restore-completed", + "name": "permanent_restore_completed" + }, + { + "value": "permanent-restore-failed", + "name": "permanent_restore_failed" + } + ] + } + }, "com.amazonaws.ec2#TotalLocalStorageGB": { "type": "structure", "members": { @@ -72863,6 +73675,15 @@ ] } }, + "com.amazonaws.ec2#snapshotTierStatusSet": { + "type": "list", + "member": { + "target": "com.amazonaws.ec2#SnapshotTierStatus", + "traits": { + "smithy.api#xmlName": "item" + } + } + }, "com.amazonaws.ec2#totalFpgaMemory": { "type": "integer", "traits": { diff --git a/codegen/sdk-codegen/aws-models/ecr.json b/codegen/sdk-codegen/aws-models/ecr.json index 04327368459b..fccc5d863834 100644 --- a/codegen/sdk-codegen/aws-models/ecr.json +++ b/codegen/sdk-codegen/aws-models/ecr.json @@ -31,6 +31,24 @@ "shapes": { "com.amazonaws.ecr#AmazonEC2ContainerRegistry_V20150921": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "ECR", + "arnNamespace": "ecr", + "cloudFormationName": "ECR", + "cloudTrailEventSource": "ecr.amazonaws.com", + "endpointPrefix": "api.ecr" + }, + "aws.auth#sigv4": { + "name": "ecr" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Amazon Elastic Container Registry\n

                                                                      Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the\n familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR\n provides a secure, scalable, and reliable registry for your Docker or Open Container\n Initiative (OCI) images. Amazon ECR supports private repositories with resource-based\n permissions using IAM so that specific users or Amazon EC2 instances can access\n repositories and images.

                                                                      \n

                                                                      Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the\n Amazon Web Services General Reference.

                                                                      ", + "smithy.api#title": "Amazon EC2 Container Registry", + "smithy.api#xmlNamespace": { + "uri": "http://ecr.amazonaws.com/doc/2015-09-21/" + } + }, "version": "2015-09-21", "operations": [ { @@ -42,15 +60,24 @@ { "target": "com.amazonaws.ecr#BatchGetImage" }, + { + "target": "com.amazonaws.ecr#BatchGetRepositoryScanningConfiguration" + }, { "target": "com.amazonaws.ecr#CompleteLayerUpload" }, + { + "target": "com.amazonaws.ecr#CreatePullThroughCacheRule" + }, { "target": "com.amazonaws.ecr#CreateRepository" }, { "target": "com.amazonaws.ecr#DeleteLifecyclePolicy" }, + { + "target": "com.amazonaws.ecr#DeletePullThroughCacheRule" + }, { "target": "com.amazonaws.ecr#DeleteRegistryPolicy" }, @@ -69,6 +96,9 @@ { "target": "com.amazonaws.ecr#DescribeImageScanFindings" }, + { + "target": "com.amazonaws.ecr#DescribePullThroughCacheRules" + }, { "target": "com.amazonaws.ecr#DescribeRegistry" }, @@ -90,6 +120,9 @@ { "target": "com.amazonaws.ecr#GetRegistryPolicy" }, + { + "target": "com.amazonaws.ecr#GetRegistryScanningConfiguration" + }, { "target": "com.amazonaws.ecr#GetRepositoryPolicy" }, @@ -117,6 +150,9 @@ { "target": "com.amazonaws.ecr#PutRegistryPolicy" }, + { + "target": "com.amazonaws.ecr#PutRegistryScanningConfiguration" + }, { "target": "com.amazonaws.ecr#PutReplicationConfiguration" }, @@ -138,25 +174,10 @@ { "target": "com.amazonaws.ecr#UploadLayerPart" } - ], - "traits": { - "aws.api#service": { - "sdkId": "ECR", - "arnNamespace": "ecr", - "cloudFormationName": "ECR", - "cloudTrailEventSource": "ecr.amazonaws.com", - "endpointPrefix": "api.ecr" - }, - "aws.auth#sigv4": { - "name": "ecr" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Elastic Container Registry\n

                                                                      Amazon Elastic Container Registry (Amazon ECR) is a managed container image registry service. Customers can use the\n familiar Docker CLI, or their preferred client, to push, pull, and manage images. Amazon ECR\n provides a secure, scalable, and reliable registry for your Docker or Open Container\n Initiative (OCI) images. Amazon ECR supports private repositories with resource-based\n permissions using IAM so that specific users or Amazon EC2 instances can access\n repositories and images.

                                                                      \n

                                                                      Amazon ECR has service endpoints in each supported Region. For more information, see Amazon ECR endpoints in the\n Amazon Web Services General Reference.

                                                                      ", - "smithy.api#title": "Amazon EC2 Container Registry", - "smithy.api#xmlNamespace": { - "uri": "http://ecr.amazonaws.com/doc/2015-09-21/" - } - } + ] + }, + "com.amazonaws.ecr#Arch": { + "type": "string" }, "com.amazonaws.ecr#Arn": { "type": "string" @@ -212,6 +233,9 @@ } } }, + "com.amazonaws.ecr#Author": { + "type": "string" + }, "com.amazonaws.ecr#AuthorizationData": { "type": "structure", "members": { @@ -244,12 +268,71 @@ "target": "com.amazonaws.ecr#AuthorizationData" } }, + "com.amazonaws.ecr#AwsEcrContainerImageDetails": { + "type": "structure", + "members": { + "architecture": { + "target": "com.amazonaws.ecr#Arch", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the Amazon ECR container image.

                                                                      " + } + }, + "author": { + "target": "com.amazonaws.ecr#Author", + "traits": { + "smithy.api#documentation": "

                                                                      The image author of the Amazon ECR container image.

                                                                      " + } + }, + "imageHash": { + "target": "com.amazonaws.ecr#ImageDigest", + "traits": { + "smithy.api#documentation": "

                                                                      The image hash of the Amazon ECR container image.

                                                                      " + } + }, + "imageTags": { + "target": "com.amazonaws.ecr#ImageTagsList", + "traits": { + "smithy.api#documentation": "

                                                                      The image tags attached to the Amazon ECR container image.

                                                                      " + } + }, + "platform": { + "target": "com.amazonaws.ecr#Platform", + "traits": { + "smithy.api#documentation": "

                                                                      The platform of the Amazon ECR container image.

                                                                      " + } + }, + "pushedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the Amazon ECR container image was pushed.

                                                                      " + } + }, + "registry": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The registry the Amazon ECR container image belongs to.

                                                                      " + } + }, + "repositoryName": { + "target": "com.amazonaws.ecr#RepositoryName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the repository the Amazon ECR container image resides in.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The image details of the Amazon ECR container image.

                                                                      " + } + }, "com.amazonaws.ecr#Base64": { "type": "string", "traits": { "smithy.api#pattern": "^\\S+$" } }, + "com.amazonaws.ecr#BaseScore": { + "type": "double" + }, "com.amazonaws.ecr#BatchCheckLayerAvailability": { "type": "operation", "input": { @@ -454,6 +537,61 @@ } } }, + "com.amazonaws.ecr#BatchGetRepositoryScanningConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#BatchGetRepositoryScanningConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.ecr#BatchGetRepositoryScanningConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#RepositoryNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the scanning configuration for one or more repositories.

                                                                      " + } + }, + "com.amazonaws.ecr#BatchGetRepositoryScanningConfigurationRequest": { + "type": "structure", + "members": { + "repositoryNames": { + "target": "com.amazonaws.ecr#ScanningConfigurationRepositoryNameList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more repository names to get the scanning configuration for.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ecr#BatchGetRepositoryScanningConfigurationResponse": { + "type": "structure", + "members": { + "scanningConfigurations": { + "target": "com.amazonaws.ecr#RepositoryScanningConfigurationList", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning configuration for the requested repositories.

                                                                      " + } + }, + "failures": { + "target": "com.amazonaws.ecr#RepositoryScanningConfigurationFailureList", + "traits": { + "smithy.api#documentation": "

                                                                      Any failures associated with the call.

                                                                      " + } + } + } + }, "com.amazonaws.ecr#BatchedOperationLayerDigest": { "type": "string", "traits": { @@ -513,7 +651,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Informs Amazon ECR that the image layer upload has completed for a specified registry,\n repository name, and upload ID. You can optionally provide a sha256 digest\n of the image layer for data validation purposes.

                                                                      \n

                                                                      When an image is pushed, the CompleteLayerUpload API is called once per each new image\n layer to verify that the upload has completed.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Informs Amazon ECR that the image layer upload has completed for a specified registry,\n repository name, and upload ID. You can optionally provide a sha256 digest\n of the image layer for data validation purposes.

                                                                      \n

                                                                      When an image is pushed, the CompleteLayerUpload API is called once per each new image\n layer to verify that the upload has completed.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " } }, "com.amazonaws.ecr#CompleteLayerUploadRequest": { @@ -577,6 +715,92 @@ } } }, + "com.amazonaws.ecr#CreatePullThroughCacheRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#CreatePullThroughCacheRuleRequest" + }, + "output": { + "target": "com.amazonaws.ecr#CreatePullThroughCacheRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#LimitExceededException" + }, + { + "target": "com.amazonaws.ecr#PullThroughCacheRuleAlreadyExistsException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#UnsupportedUpstreamRegistryException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a pull through cache rule. A pull through cache rule provides a way to cache\n images from an external public registry in your Amazon ECR private registry.

                                                                      " + } + }, + "com.amazonaws.ecr#CreatePullThroughCacheRuleRequest": { + "type": "structure", + "members": { + "ecrRepositoryPrefix": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The repository name prefix to use when caching images from the source registry.

                                                                      ", + "smithy.api#required": {} + } + }, + "upstreamRegistryUrl": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      The registry URL of the upstream public registry to use as the source for the pull\n through cache rule.

                                                                      ", + "smithy.api#required": {} + } + }, + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry to create the pull through cache\n rule for. If you do not specify a registry, the default registry is assumed.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#CreatePullThroughCacheRuleResponse": { + "type": "structure", + "members": { + "ecrRepositoryPrefix": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository prefix associated with the pull through cache rule.

                                                                      " + } + }, + "upstreamRegistryUrl": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      The upstream registry URL associated with the pull through cache rule.

                                                                      " + } + }, + "createdAt": { + "target": "com.amazonaws.ecr#CreationTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time, in JavaScript date format, when the pull through cache rule was\n created.

                                                                      " + } + }, + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The registry ID associated with the request.

                                                                      " + } + } + } + }, "com.amazonaws.ecr#CreateRepository": { "type": "operation", "input": { @@ -618,7 +842,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

                                                                      The AWS account ID associated with the registry to create the repository.\n If you do not specify a registry, the default registry is assumed.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry to create the repository.\n If you do not specify a registry, the default registry is assumed.

                                                                      " } }, "repositoryName": { @@ -668,6 +892,111 @@ "com.amazonaws.ecr#CreationTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#CvssScore": { + "type": "structure", + "members": { + "baseScore": { + "target": "com.amazonaws.ecr#BaseScore", + "traits": { + "smithy.api#documentation": "

                                                                      The base CVSS score used for the finding.

                                                                      " + } + }, + "scoringVector": { + "target": "com.amazonaws.ecr#ScoringVector", + "traits": { + "smithy.api#documentation": "

                                                                      The vector string of the CVSS score.

                                                                      " + } + }, + "source": { + "target": "com.amazonaws.ecr#Source", + "traits": { + "smithy.api#documentation": "

                                                                      The source of the CVSS score.

                                                                      " + } + }, + "version": { + "target": "com.amazonaws.ecr#Version", + "traits": { + "smithy.api#documentation": "

                                                                      The version of CVSS used for the score.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS score for a finding.

                                                                      " + } + }, + "com.amazonaws.ecr#CvssScoreAdjustment": { + "type": "structure", + "members": { + "metric": { + "target": "com.amazonaws.ecr#Metric", + "traits": { + "smithy.api#documentation": "

                                                                      The metric used to adjust the CVSS score.

                                                                      " + } + }, + "reason": { + "target": "com.amazonaws.ecr#Reason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason the CVSS score has been adjustment.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details on adjustments Amazon Inspector made to the CVSS score for a finding.

                                                                      " + } + }, + "com.amazonaws.ecr#CvssScoreAdjustmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#CvssScoreAdjustment" + } + }, + "com.amazonaws.ecr#CvssScoreDetails": { + "type": "structure", + "members": { + "adjustments": { + "target": "com.amazonaws.ecr#CvssScoreAdjustmentList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about adjustment Amazon Inspector made to the CVSS score.

                                                                      " + } + }, + "score": { + "target": "com.amazonaws.ecr#Score", + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS score.

                                                                      " + } + }, + "scoreSource": { + "target": "com.amazonaws.ecr#Source", + "traits": { + "smithy.api#documentation": "

                                                                      The source for the CVSS score.

                                                                      " + } + }, + "scoringVector": { + "target": "com.amazonaws.ecr#ScoringVector", + "traits": { + "smithy.api#documentation": "

                                                                      The vector for the CVSS score.

                                                                      " + } + }, + "version": { + "target": "com.amazonaws.ecr#Version", + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS version used in scoring.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the CVSS score.

                                                                      " + } + }, + "com.amazonaws.ecr#CvssScoreList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#CvssScore" + } + }, + "com.amazonaws.ecr#Date": { + "type": "timestamp" + }, "com.amazonaws.ecr#DeleteLifecyclePolicy": { "type": "operation", "input": { @@ -741,6 +1070,79 @@ } } }, + "com.amazonaws.ecr#DeletePullThroughCacheRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DeletePullThroughCacheRuleRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DeletePullThroughCacheRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#PullThroughCacheRuleNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a pull through cache rule.

                                                                      " + } + }, + "com.amazonaws.ecr#DeletePullThroughCacheRuleRequest": { + "type": "structure", + "members": { + "ecrRepositoryPrefix": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository prefix associated with the pull through cache rule to\n delete.

                                                                      ", + "smithy.api#required": {} + } + }, + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry that contains the pull through cache\n rule. If you do not specify a registry, the default registry is assumed.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#DeletePullThroughCacheRuleResponse": { + "type": "structure", + "members": { + "ecrRepositoryPrefix": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository prefix associated with the request.

                                                                      " + } + }, + "upstreamRegistryUrl": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      The upstream registry URL associated with the pull through cache rule.

                                                                      " + } + }, + "createdAt": { + "target": "com.amazonaws.ecr#CreationTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The timestamp associated with the pull through cache rule.

                                                                      " + } + }, + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The registry ID associated with the request.

                                                                      " + } + } + } + }, "com.amazonaws.ecr#DeleteRegistryPolicy": { "type": "operation", "input": { @@ -1015,6 +1417,9 @@ }, { "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" } ], "traits": { @@ -1212,24 +1617,101 @@ "filter": { "target": "com.amazonaws.ecr#DescribeImagesFilter", "traits": { - "smithy.api#documentation": "

                                                                      The filter key and value with which to filter your DescribeImages\n results.

                                                                      " + "smithy.api#documentation": "

                                                                      The filter key and value with which to filter your DescribeImages\n results.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#DescribeImagesResponse": { + "type": "structure", + "members": { + "imageDetails": { + "target": "com.amazonaws.ecr#ImageDetailList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of ImageDetail objects that contain data about the\n image.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The nextToken value to include in a future DescribeImages\n request. When the results of a DescribeImages request exceed\n maxResults, this value can be used to retrieve the next page of\n results. This value is null when there are no more results to\n return.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#DescribePullThroughCacheRules": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#DescribePullThroughCacheRulesRequest" + }, + "output": { + "target": "com.amazonaws.ecr#DescribePullThroughCacheRulesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#PullThroughCacheRuleNotFoundException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the pull through cache rules for a registry.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.ecr#DescribePullThroughCacheRulesRequest": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry to return the pull through cache\n rules for. If you do not specify a registry, the default registry is assumed.

                                                                      " + } + }, + "ecrRepositoryPrefixes": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefixList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository prefixes associated with the pull through cache rules to return.\n If no repository prefix value is specified, all pull through cache rules are\n returned.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.ecr#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The nextToken value returned from a previous paginated\n DescribePullThroughCacheRulesRequest request where\n maxResults was used and the results exceeded the value of that\n parameter. Pagination continues from the end of the previous results that returned the\n nextToken value. This value is null when there are no more results to\n return.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.ecr#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of pull through cache rules returned by\n DescribePullThroughCacheRulesRequest in paginated output. When this\n parameter is used, DescribePullThroughCacheRulesRequest only returns\n maxResults results in a single page along with a nextToken\n response element. The remaining results of the initial request can be seen by sending\n another DescribePullThroughCacheRulesRequest request with the returned\n nextToken value. This value can be between 1 and 1000. If this\n parameter is not used, then DescribePullThroughCacheRulesRequest returns up\n to 100 results and a nextToken value, if applicable.

                                                                      " } } } }, - "com.amazonaws.ecr#DescribeImagesResponse": { + "com.amazonaws.ecr#DescribePullThroughCacheRulesResponse": { "type": "structure", "members": { - "imageDetails": { - "target": "com.amazonaws.ecr#ImageDetailList", + "pullThroughCacheRules": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleList", "traits": { - "smithy.api#documentation": "

                                                                      A list of ImageDetail objects that contain data about the\n image.

                                                                      " + "smithy.api#documentation": "

                                                                      The details of the pull through cache rules.

                                                                      " } }, "nextToken": { "target": "com.amazonaws.ecr#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      The nextToken value to include in a future DescribeImages\n request. When the results of a DescribeImages request exceed\n maxResults, this value can be used to retrieve the next page of\n results. This value is null when there are no more results to\n return.

                                                                      " + "smithy.api#documentation": "

                                                                      The nextToken value to include in a future\n DescribePullThroughCacheRulesRequest request. When the results of a\n DescribePullThroughCacheRulesRequest request exceed\n maxResults, this value can be used to retrieve the next page of\n results. This value is null when there are no more results to return.

                                                                      " } } } @@ -1374,7 +1856,7 @@ "encryptionType": { "target": "com.amazonaws.ecr#EncryptionType", "traits": { - "smithy.api#documentation": "

                                                                      The encryption type to use.

                                                                      \n

                                                                      If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide..

                                                                      \n

                                                                      If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide..

                                                                      ", + "smithy.api#documentation": "

                                                                      The encryption type to use.

                                                                      \n

                                                                      If you use the KMS encryption type, the contents of the repository will\n be encrypted using server-side encryption with Key Management Service key stored in KMS. When you\n use KMS to encrypt your data, you can either use the default Amazon Web Services managed KMS key\n for Amazon ECR, or specify your own KMS key, which you already created. For more\n information, see Protecting data using server-side\n encryption with an KMS key stored in Key Management Service (SSE-KMS) in the\n Amazon Simple Storage Service Console Developer Guide.

                                                                      \n

                                                                      If you use the AES256 encryption type, Amazon ECR uses server-side encryption\n with Amazon S3-managed encryption keys which encrypts the images in the repository using an\n AES-256 encryption algorithm. For more information, see Protecting data using\n server-side encryption with Amazon S3-managed encryption keys (SSE-S3) in the\n Amazon Simple Storage Service Console Developer Guide.

                                                                      ", "smithy.api#required": {} } }, @@ -1404,6 +1886,116 @@ ] } }, + "com.amazonaws.ecr#EnhancedImageScanFinding": { + "type": "structure", + "members": { + "awsAccountId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the image.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.ecr#FindingDescription", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the finding.

                                                                      " + } + }, + "findingArn": { + "target": "com.amazonaws.ecr#FindingArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the finding.

                                                                      " + } + }, + "firstObservedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the finding was first observed.

                                                                      " + } + }, + "lastObservedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the finding was last observed.

                                                                      " + } + }, + "packageVulnerabilityDetails": { + "target": "com.amazonaws.ecr#PackageVulnerabilityDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the details of a package vulnerability finding.

                                                                      " + } + }, + "remediation": { + "target": "com.amazonaws.ecr#Remediation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the details about how to remediate a finding.

                                                                      " + } + }, + "resources": { + "target": "com.amazonaws.ecr#ResourceList", + "traits": { + "smithy.api#documentation": "

                                                                      Contains information on the resources involved in a finding.

                                                                      " + } + }, + "score": { + "target": "com.amazonaws.ecr#Score", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Inspector score given to the finding.

                                                                      " + } + }, + "scoreDetails": { + "target": "com.amazonaws.ecr#ScoreDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details of the Amazon Inspector score.

                                                                      " + } + }, + "severity": { + "target": "com.amazonaws.ecr#Severity", + "traits": { + "smithy.api#documentation": "

                                                                      The severity of the finding.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.ecr#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the finding.

                                                                      " + } + }, + "title": { + "target": "com.amazonaws.ecr#Title", + "traits": { + "smithy.api#documentation": "

                                                                      The title of the finding.

                                                                      " + } + }, + "type": { + "target": "com.amazonaws.ecr#Type", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the finding.

                                                                      " + } + }, + "updatedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the finding was last updated at.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details of an enhanced image scan. This is returned when enhanced scanning is\n enabled for your private registry.

                                                                      " + } + }, + "com.amazonaws.ecr#EnhancedImageScanFindingList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#EnhancedImageScanFinding" + } + }, + "com.amazonaws.ecr#Epoch": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.ecr#EvaluationTimestamp": { "type": "timestamp" }, @@ -1413,6 +2005,12 @@ "com.amazonaws.ecr#ExpirationTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#FilePath": { + "type": "string" + }, + "com.amazonaws.ecr#FindingArn": { + "type": "string" + }, "com.amazonaws.ecr#FindingDescription": { "type": "string" }, @@ -1859,6 +2457,50 @@ } } }, + "com.amazonaws.ecr#GetRegistryScanningConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#GetRegistryScanningConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.ecr#GetRegistryScanningConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the scanning configuration for a registry.

                                                                      " + } + }, + "com.amazonaws.ecr#GetRegistryScanningConfigurationRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.ecr#GetRegistryScanningConfigurationResponse": { + "type": "structure", + "members": { + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the registry.

                                                                      " + } + }, + "scanningConfiguration": { + "target": "com.amazonaws.ecr#RegistryScanningConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning configuration for the registry.

                                                                      " + } + } + } + }, "com.amazonaws.ecr#GetRepositoryPolicy": { "type": "operation", "input": { @@ -2229,7 +2871,7 @@ "registryId": { "target": "com.amazonaws.ecr#RegistryId", "traits": { - "smithy.api#documentation": "

                                                                      The AWS account ID associated with the registry to which the image belongs.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry to which the image belongs.

                                                                      " } }, "status": { @@ -2314,16 +2956,22 @@ "smithy.api#documentation": "

                                                                      The time when the vulnerability data was last scanned.

                                                                      " } }, + "findingSeverityCounts": { + "target": "com.amazonaws.ecr#FindingSeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      The image vulnerability counts, sorted by severity.

                                                                      " + } + }, "findings": { "target": "com.amazonaws.ecr#ImageScanFindingList", "traits": { "smithy.api#documentation": "

                                                                      The findings from the image scan.

                                                                      " } }, - "findingSeverityCounts": { - "target": "com.amazonaws.ecr#FindingSeverityCounts", + "enhancedFindings": { + "target": "com.amazonaws.ecr#EnhancedImageScanFindingList", "traits": { - "smithy.api#documentation": "

                                                                      The image vulnerability counts, sorted by severity.

                                                                      " + "smithy.api#documentation": "

                                                                      Details about the enhanced scan findings from Amazon Inspector.

                                                                      " } } }, @@ -2439,6 +3087,12 @@ ] } }, + "com.amazonaws.ecr#ImageTagsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#ImageTag" + } + }, "com.amazonaws.ecr#InitiateLayerUpload": { "type": "operation", "input": { @@ -2462,7 +3116,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Notifies Amazon ECR that you intend to upload an image layer.

                                                                      \n

                                                                      When an image is pushed, the InitiateLayerUpload API is called once per image layer\n that has not already been uploaded. Whether or not an image layer has been uploaded is\n determined by the BatchCheckLayerAvailability API action.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Notifies Amazon ECR that you intend to upload an image layer.

                                                                      \n

                                                                      When an image is pushed, the InitiateLayerUpload API is called once per image layer\n that has not already been uploaded. Whether or not an image layer has been uploaded is\n determined by the BatchCheckLayerAvailability API action.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " } }, "com.amazonaws.ecr#InitiateLayerUploadRequest": { @@ -3114,46 +3768,222 @@ "smithy.api#documentation": "

                                                                      List the tags for an Amazon ECR resource.

                                                                      " } }, - "com.amazonaws.ecr#ListTagsForResourceRequest": { + "com.amazonaws.ecr#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.ecr#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the\n only supported resource is an Amazon ECR repository.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.ecr#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.ecr#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags for the resource.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.ecr#MediaType": { + "type": "string" + }, + "com.amazonaws.ecr#MediaTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#MediaType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.ecr#Metric": { + "type": "string" + }, + "com.amazonaws.ecr#NextToken": { + "type": "string" + }, + "com.amazonaws.ecr#PackageManager": { + "type": "string" + }, + "com.amazonaws.ecr#PackageVulnerabilityDetails": { + "type": "structure", + "members": { + "cvss": { + "target": "com.amazonaws.ecr#CvssScoreList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the CVSS score of a finding.

                                                                      " + } + }, + "referenceUrls": { + "target": "com.amazonaws.ecr#ReferenceUrlsList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more URLs that contain details about this vulnerability type.

                                                                      " + } + }, + "relatedVulnerabilities": { + "target": "com.amazonaws.ecr#RelatedVulnerabilitiesList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more vulnerabilities related to the one identified in this finding.

                                                                      " + } + }, + "source": { + "target": "com.amazonaws.ecr#Source", + "traits": { + "smithy.api#documentation": "

                                                                      The source of the vulnerability information.

                                                                      " + } + }, + "sourceUrl": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      A URL to the source of the vulnerability information.

                                                                      " + } + }, + "vendorCreatedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that this vulnerability was first added to the vendor's\n database.

                                                                      " + } + }, + "vendorSeverity": { + "target": "com.amazonaws.ecr#Severity", + "traits": { + "smithy.api#documentation": "

                                                                      The severity the vendor has given to this vulnerability type.

                                                                      " + } + }, + "vendorUpdatedAt": { + "target": "com.amazonaws.ecr#Date", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the vendor last updated this vulnerability in their database.

                                                                      " + } + }, + "vulnerabilityId": { + "target": "com.amazonaws.ecr#VulnerabilityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID given to this vulnerability.

                                                                      " + } + }, + "vulnerablePackages": { + "target": "com.amazonaws.ecr#VulnerablePackagesList", + "traits": { + "smithy.api#documentation": "

                                                                      The packages impacted by this vulnerability.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a package vulnerability finding.

                                                                      " + } + }, + "com.amazonaws.ecr#PartSize": { + "type": "long", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.ecr#Platform": { + "type": "string" + }, + "com.amazonaws.ecr#ProxyEndpoint": { + "type": "string" + }, + "com.amazonaws.ecr#PullThroughCacheRule": { + "type": "structure", + "members": { + "ecrRepositoryPrefix": { + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository prefix associated with the pull through cache rule.

                                                                      " + } + }, + "upstreamRegistryUrl": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      The upstream registry URL associated with the pull through cache rule.

                                                                      " + } + }, + "createdAt": { + "target": "com.amazonaws.ecr#CreationTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the pull through cache was created.

                                                                      " + } + }, + "registryId": { + "target": "com.amazonaws.ecr#RegistryId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the registry the pull through cache rule is\n associated with.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details of a pull through cache rule.

                                                                      " + } + }, + "com.amazonaws.ecr#PullThroughCacheRuleAlreadyExistsException": { "type": "structure", "members": { - "resourceArn": { - "target": "com.amazonaws.ecr#Arn", - "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that identifies the resource for which to list the tags. Currently, the\n only supported resource is an Amazon ECR repository.

                                                                      ", - "smithy.api#required": {} - } + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A pull through cache rule with these settings already exists for the private\n registry.

                                                                      ", + "smithy.api#error": "client" } }, - "com.amazonaws.ecr#ListTagsForResourceResponse": { + "com.amazonaws.ecr#PullThroughCacheRuleList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#PullThroughCacheRule" + } + }, + "com.amazonaws.ecr#PullThroughCacheRuleNotFoundException": { "type": "structure", "members": { - "tags": { - "target": "com.amazonaws.ecr#TagList", - "traits": { - "smithy.api#documentation": "

                                                                      The tags for the resource.

                                                                      " - } + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The pull through cache rule was not found. Specify a valid pull through cache rule and\n try again.

                                                                      ", + "smithy.api#error": "client" } }, - "com.amazonaws.ecr#MaxResults": { - "type": "integer", + "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix": { + "type": "string", "traits": { - "smithy.api#box": {}, - "smithy.api#range": { - "min": 1, - "max": 1000 - } + "smithy.api#length": { + "min": 2, + "max": 20 + }, + "smithy.api#pattern": "^[a-z0-9]+(?:[._-][a-z0-9]+)*$" } }, - "com.amazonaws.ecr#MediaType": { - "type": "string" - }, - "com.amazonaws.ecr#MediaTypeList": { + "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefixList": { "type": "list", "member": { - "target": "com.amazonaws.ecr#MediaType" + "target": "com.amazonaws.ecr#PullThroughCacheRuleRepositoryPrefix" }, "traits": { "smithy.api#length": { @@ -3162,21 +3992,6 @@ } } }, - "com.amazonaws.ecr#NextToken": { - "type": "string" - }, - "com.amazonaws.ecr#PartSize": { - "type": "long", - "traits": { - "smithy.api#box": {}, - "smithy.api#range": { - "min": 0 - } - } - }, - "com.amazonaws.ecr#ProxyEndpoint": { - "type": "string" - }, "com.amazonaws.ecr#PushTimestamp": { "type": "timestamp" }, @@ -3295,6 +4110,9 @@ }, { "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" } ], "traits": { @@ -3543,6 +4361,57 @@ } } }, + "com.amazonaws.ecr#PutRegistryScanningConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.ecr#PutRegistryScanningConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.ecr#PutRegistryScanningConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.ecr#InvalidParameterException" + }, + { + "target": "com.amazonaws.ecr#ServerException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates or updates the scanning configuration for your private registry.

                                                                      " + } + }, + "com.amazonaws.ecr#PutRegistryScanningConfigurationRequest": { + "type": "structure", + "members": { + "scanType": { + "target": "com.amazonaws.ecr#ScanType", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning type to set for the registry.

                                                                      \n

                                                                      By default, the BASIC scan type is used. When basic scanning is set, you\n may specify filters to determine which individual repositories, or all repositories, are\n scanned when new images are pushed. Alternatively, you can do manual scans of images\n with basic scanning.

                                                                      \n

                                                                      When the ENHANCED scan type is set, Amazon Inspector provides automated, continuous\n scanning of all repositories in your registry.

                                                                      " + } + }, + "rules": { + "target": "com.amazonaws.ecr#RegistryScanningRuleList", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning rules to use for the registry. A scanning rule is used to determine which\n repository filters are used and at what frequency scanning will occur.

                                                                      " + } + } + } + }, + "com.amazonaws.ecr#PutRegistryScanningConfigurationResponse": { + "type": "structure", + "members": { + "registryScanningConfiguration": { + "target": "com.amazonaws.ecr#RegistryScanningConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning configuration for your registry.

                                                                      " + } + } + } + }, "com.amazonaws.ecr#PutReplicationConfiguration": { "type": "operation", "input": { @@ -3589,6 +4458,38 @@ } } }, + "com.amazonaws.ecr#Reason": { + "type": "string" + }, + "com.amazonaws.ecr#Recommendation": { + "type": "structure", + "members": { + "url": { + "target": "com.amazonaws.ecr#Url", + "traits": { + "smithy.api#documentation": "

                                                                      The URL address to the CVE remediation recommendations.

                                                                      " + } + }, + "text": { + "target": "com.amazonaws.ecr#RecommendationText", + "traits": { + "smithy.api#documentation": "

                                                                      The recommended course of action to remediate the finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the recommended course of action to remediate the finding.

                                                                      " + } + }, + "com.amazonaws.ecr#RecommendationText": { + "type": "string" + }, + "com.amazonaws.ecr#ReferenceUrlsList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#Url" + } + }, "com.amazonaws.ecr#ReferencedImagesNotFoundException": { "type": "structure", "members": { @@ -3638,6 +4539,86 @@ } } }, + "com.amazonaws.ecr#RegistryScanningConfiguration": { + "type": "structure", + "members": { + "scanType": { + "target": "com.amazonaws.ecr#ScanType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of scanning configured for the registry.

                                                                      " + } + }, + "rules": { + "target": "com.amazonaws.ecr#RegistryScanningRuleList", + "traits": { + "smithy.api#documentation": "

                                                                      The scanning rules associated with the registry.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The scanning configuration for a private registry.

                                                                      " + } + }, + "com.amazonaws.ecr#RegistryScanningRule": { + "type": "structure", + "members": { + "scanFrequency": { + "target": "com.amazonaws.ecr#ScanFrequency", + "traits": { + "smithy.api#documentation": "

                                                                      The frequency that scans are performed at for a private registry.

                                                                      ", + "smithy.api#required": {} + } + }, + "repositoryFilters": { + "target": "com.amazonaws.ecr#ScanningRepositoryFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The repository filters associated with the scanning configuration for a private\n registry.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details of a scanning rule for a private registry.

                                                                      " + } + }, + "com.amazonaws.ecr#RegistryScanningRuleList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RegistryScanningRule" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2 + } + } + }, + "com.amazonaws.ecr#RelatedVulnerabilitiesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RelatedVulnerability" + } + }, + "com.amazonaws.ecr#RelatedVulnerability": { + "type": "string" + }, + "com.amazonaws.ecr#Release": { + "type": "string" + }, + "com.amazonaws.ecr#Remediation": { + "type": "structure", + "members": { + "recommendation": { + "target": "com.amazonaws.ecr#Recommendation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the recommended course of action to\n remediate the finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on how to remediate a finding.

                                                                      " + } + }, "com.amazonaws.ecr#ReplicationConfiguration": { "type": "structure", "members": { @@ -3878,73 +4859,223 @@ "min": 2, "max": 256 }, - "smithy.api#pattern": "^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$" - } - }, - "com.amazonaws.ecr#RepositoryNameList": { - "type": "list", - "member": { - "target": "com.amazonaws.ecr#RepositoryName" + "smithy.api#pattern": "^(?:[a-z0-9]+(?:[._-][a-z0-9]+)*/)*[a-z0-9]+(?:[._-][a-z0-9]+)*$" + } + }, + "com.amazonaws.ecr#RepositoryNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.ecr#RepositoryNotEmptyException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The specified repository contains images. To delete a repository that contains images,\n you must force the deletion with the force parameter.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.ecr#RepositoryNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The specified repository could not be found. Check the spelling of the specified\n repository and ensure that you are performing operations on the correct registry.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.ecr#RepositoryPolicyNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The specified repository and registry combination does not have an associated\n repository policy.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.ecr#RepositoryPolicyText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 10240 + } + } + }, + "com.amazonaws.ecr#RepositoryScanningConfiguration": { + "type": "structure", + "members": { + "repositoryArn": { + "target": "com.amazonaws.ecr#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the repository.

                                                                      " + } + }, + "repositoryName": { + "target": "com.amazonaws.ecr#RepositoryName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the repository.

                                                                      " + } + }, + "scanOnPush": { + "target": "com.amazonaws.ecr#ScanOnPushFlag", + "traits": { + "smithy.api#documentation": "

                                                                      Whether or not scan on push is configured for the repository.

                                                                      " + } + }, + "scanFrequency": { + "target": "com.amazonaws.ecr#ScanFrequency", + "traits": { + "smithy.api#documentation": "

                                                                      The scan frequency for the repository.

                                                                      " + } + }, + "appliedScanFilters": { + "target": "com.amazonaws.ecr#ScanningRepositoryFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The scan filters applied to the repository.

                                                                      " + } + } }, "traits": { - "smithy.api#length": { - "min": 1, - "max": 100 - } + "smithy.api#documentation": "

                                                                      The details of the scanning configuration for a repository.

                                                                      " } }, - "com.amazonaws.ecr#RepositoryNotEmptyException": { + "com.amazonaws.ecr#RepositoryScanningConfigurationFailure": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.ecr#ExceptionMessage", + "repositoryName": { + "target": "com.amazonaws.ecr#RepositoryName", "traits": { - "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the repository.

                                                                      " + } + }, + "failureCode": { + "target": "com.amazonaws.ecr#ScanningConfigurationFailureCode", + "traits": { + "smithy.api#documentation": "

                                                                      The failure code.

                                                                      " + } + }, + "failureReason": { + "target": "com.amazonaws.ecr#ScanningConfigurationFailureReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the failure.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The specified repository contains images. To delete a repository that contains images,\n you must force the deletion with the force parameter.

                                                                      ", - "smithy.api#error": "client" + "smithy.api#documentation": "

                                                                      The details about any failures associated with the scanning configuration of a\n repository.

                                                                      " } }, - "com.amazonaws.ecr#RepositoryNotFoundException": { + "com.amazonaws.ecr#RepositoryScanningConfigurationFailureList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryScanningConfigurationFailure" + } + }, + "com.amazonaws.ecr#RepositoryScanningConfigurationList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryScanningConfiguration" + } + }, + "com.amazonaws.ecr#Resource": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.ecr#ExceptionMessage", + "details": { + "target": "com.amazonaws.ecr#ResourceDetails", "traits": { - "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + "smithy.api#documentation": "

                                                                      An object that contains details about the resource involved in a finding.

                                                                      " + } + }, + "id": { + "target": "com.amazonaws.ecr#ResourceId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.ecr#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the resource.

                                                                      " + } + }, + "type": { + "target": "com.amazonaws.ecr#Type", + "traits": { + "smithy.api#documentation": "

                                                                      The type of resource.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The specified repository could not be found. Check the spelling of the specified\n repository and ensure that you are performing operations on the correct registry.

                                                                      ", - "smithy.api#error": "client" + "smithy.api#documentation": "

                                                                      Details about the resource involved in a finding.

                                                                      " } }, - "com.amazonaws.ecr#RepositoryPolicyNotFoundException": { + "com.amazonaws.ecr#ResourceDetails": { "type": "structure", "members": { - "message": { - "target": "com.amazonaws.ecr#ExceptionMessage", + "awsEcrContainerImage": { + "target": "com.amazonaws.ecr#AwsEcrContainerImageDetails", "traits": { - "smithy.api#documentation": "

                                                                      The error message associated with the exception.

                                                                      " + "smithy.api#documentation": "

                                                                      An object that contains details about the Amazon ECR container image involved in the\n finding.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The specified repository and registry combination does not have an associated\n repository policy.

                                                                      ", - "smithy.api#error": "client" + "smithy.api#documentation": "

                                                                      Contains details about the resource involved in the finding.

                                                                      " } }, - "com.amazonaws.ecr#RepositoryPolicyText": { + "com.amazonaws.ecr#ResourceId": { + "type": "string" + }, + "com.amazonaws.ecr#ResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#Resource" + } + }, + "com.amazonaws.ecr#ScanFrequency": { "type": "string", "traits": { - "smithy.api#length": { - "min": 0, - "max": 10240 - } + "smithy.api#enum": [ + { + "value": "SCAN_ON_PUSH", + "name": "SCAN_ON_PUSH" + }, + { + "value": "CONTINUOUS_SCAN", + "name": "CONTINUOUS_SCAN" + }, + { + "value": "MANUAL", + "name": "MANUAL" + } + ] } }, "com.amazonaws.ecr#ScanNotFoundException": { @@ -3977,6 +5108,26 @@ { "value": "FAILED", "name": "FAILED" + }, + { + "value": "UNSUPPORTED_IMAGE", + "name": "UNSUPPORTED_IMAGE" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "SCAN_ELIGIBILITY_EXPIRED", + "name": "SCAN_ELIGIBILITY_EXPIRED" + }, + { + "value": "FINDINGS_UNAVAILABLE", + "name": "FINDINGS_UNAVAILABLE" } ] } @@ -3987,6 +5138,122 @@ "com.amazonaws.ecr#ScanTimestamp": { "type": "timestamp" }, + "com.amazonaws.ecr#ScanType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BASIC", + "name": "BASIC" + }, + { + "value": "ENHANCED", + "name": "ENHANCED" + } + ] + } + }, + "com.amazonaws.ecr#ScanningConfigurationFailureCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "REPOSITORY_NOT_FOUND", + "name": "REPOSITORY_NOT_FOUND" + } + ] + } + }, + "com.amazonaws.ecr#ScanningConfigurationFailureReason": { + "type": "string" + }, + "com.amazonaws.ecr#ScanningConfigurationRepositoryNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#RepositoryName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 25 + } + } + }, + "com.amazonaws.ecr#ScanningRepositoryFilter": { + "type": "structure", + "members": { + "filter": { + "target": "com.amazonaws.ecr#ScanningRepositoryFilterValue", + "traits": { + "smithy.api#documentation": "

                                                                      The filter to use when scanning.

                                                                      ", + "smithy.api#required": {} + } + }, + "filterType": { + "target": "com.amazonaws.ecr#ScanningRepositoryFilterType", + "traits": { + "smithy.api#documentation": "

                                                                      The type associated with the filter.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details of a scanning repository filter.

                                                                      " + } + }, + "com.amazonaws.ecr#ScanningRepositoryFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#ScanningRepositoryFilter" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.ecr#ScanningRepositoryFilterType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WILDCARD", + "name": "WILDCARD" + } + ] + } + }, + "com.amazonaws.ecr#ScanningRepositoryFilterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[a-z0-9*](?:[._\\-/a-z0-9*]?[a-z0-9*]+)*$" + } + }, + "com.amazonaws.ecr#Score": { + "type": "double" + }, + "com.amazonaws.ecr#ScoreDetails": { + "type": "structure", + "members": { + "cvss": { + "target": "com.amazonaws.ecr#CvssScoreDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the CVSS score given to a finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the Amazon Inspector score given to a finding.

                                                                      " + } + }, + "com.amazonaws.ecr#ScoringVector": { + "type": "string" + }, "com.amazonaws.ecr#ServerException": { "type": "structure", "members": { @@ -4079,6 +5346,9 @@ } } }, + "com.amazonaws.ecr#Severity": { + "type": "string" + }, "com.amazonaws.ecr#SeverityCount": { "type": "integer", "traits": { @@ -4088,6 +5358,12 @@ } } }, + "com.amazonaws.ecr#Source": { + "type": "string" + }, + "com.amazonaws.ecr#SourceLayerHash": { + "type": "string" + }, "com.amazonaws.ecr#StartImageScan": { "type": "operation", "input": { @@ -4114,6 +5390,9 @@ }, { "target": "com.amazonaws.ecr#UnsupportedImageTypeException" + }, + { + "target": "com.amazonaws.ecr#ValidationException" } ], "traits": { @@ -4252,6 +5531,9 @@ } } }, + "com.amazonaws.ecr#Status": { + "type": "string" + }, "com.amazonaws.ecr#Tag": { "type": "structure", "members": { @@ -4361,6 +5643,18 @@ "com.amazonaws.ecr#TagValue": { "type": "string" }, + "com.amazonaws.ecr#Tags": { + "type": "map", + "key": { + "target": "com.amazonaws.ecr#TagKey" + }, + "value": { + "target": "com.amazonaws.ecr#TagValue" + } + }, + "com.amazonaws.ecr#Title": { + "type": "string" + }, "com.amazonaws.ecr#TooManyTagsException": { "type": "structure", "members": { @@ -4373,6 +5667,9 @@ "smithy.api#error": "client" } }, + "com.amazonaws.ecr#Type": { + "type": "string" + }, "com.amazonaws.ecr#UnsupportedImageTypeException": { "type": "structure", "members": { @@ -4385,6 +5682,18 @@ "smithy.api#error": "client" } }, + "com.amazonaws.ecr#UnsupportedUpstreamRegistryException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.ecr#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The specified upstream registry isn't supported.

                                                                      ", + "smithy.api#error": "client" + } + }, "com.amazonaws.ecr#UntagResource": { "type": "operation", "input": { @@ -4475,7 +5784,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Uploads an image layer part to Amazon ECR.

                                                                      \n

                                                                      When an image is pushed, each new image layer is uploaded in parts. The maximum size\n of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API\n is called once per each new image layer part.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Uploads an image layer part to Amazon ECR.

                                                                      \n

                                                                      When an image is pushed, each new image layer is uploaded in parts. The maximum size\n of each image layer part can be 20971520 bytes (or about 20MB). The UploadLayerPart API\n is called once per each new image layer part.

                                                                      \n \n

                                                                      This operation is used by the Amazon ECR proxy and is not generally used by\n customers for pulling and pushing images. In most cases, you should use the docker CLI to pull, tag, and push images.

                                                                      \n
                                                                      " } }, "com.amazonaws.ecr#UploadLayerPartRequest": { @@ -4584,8 +5893,79 @@ "smithy.api#httpError": 400 } }, + "com.amazonaws.ecr#Version": { + "type": "string" + }, + "com.amazonaws.ecr#VulnerabilityId": { + "type": "string" + }, "com.amazonaws.ecr#VulnerabilitySourceUpdateTimestamp": { "type": "timestamp" + }, + "com.amazonaws.ecr#VulnerablePackage": { + "type": "structure", + "members": { + "arch": { + "target": "com.amazonaws.ecr#Arch", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the vulnerable package.

                                                                      " + } + }, + "epoch": { + "target": "com.amazonaws.ecr#Epoch", + "traits": { + "smithy.api#documentation": "

                                                                      The epoch of the vulnerable package.

                                                                      " + } + }, + "filePath": { + "target": "com.amazonaws.ecr#FilePath", + "traits": { + "smithy.api#documentation": "

                                                                      The file path of the vulnerable package.

                                                                      " + } + }, + "name": { + "target": "com.amazonaws.ecr#VulnerablePackageName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the vulnerable package.

                                                                      " + } + }, + "packageManager": { + "target": "com.amazonaws.ecr#PackageManager", + "traits": { + "smithy.api#documentation": "

                                                                      The package manager of the vulnerable package.

                                                                      " + } + }, + "release": { + "target": "com.amazonaws.ecr#Release", + "traits": { + "smithy.api#documentation": "

                                                                      The release of the vulnerable package.

                                                                      " + } + }, + "sourceLayerHash": { + "target": "com.amazonaws.ecr#SourceLayerHash", + "traits": { + "smithy.api#documentation": "

                                                                      The source layer hash of the vulnerable package.

                                                                      " + } + }, + "version": { + "target": "com.amazonaws.ecr#Version", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the vulnerable package.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on the vulnerable package identified by a finding.

                                                                      " + } + }, + "com.amazonaws.ecr#VulnerablePackageName": { + "type": "string" + }, + "com.amazonaws.ecr#VulnerablePackagesList": { + "type": "list", + "member": { + "target": "com.amazonaws.ecr#VulnerablePackage" + } } } } diff --git a/codegen/sdk-codegen/aws-models/evidently.json b/codegen/sdk-codegen/aws-models/evidently.json new file mode 100644 index 000000000000..f2281c577e6e --- /dev/null +++ b/codegen/sdk-codegen/aws-models/evidently.json @@ -0,0 +1,5309 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.evidently#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      You do not have sufficient permissions to perform this action.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.evidently#Arn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" + } + }, + "com.amazonaws.evidently#BatchEvaluateFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#BatchEvaluateFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#BatchEvaluateFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      This operation assigns feature variation to user sessions. For each user session, you pass\n in an entityID that represents the user. Evidently then checks the evaluation\n rules and assigns the variation.

                                                                      \n

                                                                      The first rules that are evaluated are the override rules. If the user's\n entityID matches an override rule, the user is served the variation specified\n by that rule.

                                                                      \n

                                                                      Next, if there is a launch of the feature, the user might be assigned to a variation in\n the launch. The chance of this depends on the percentage of users that are allocated to that\n launch. If the user is enrolled in the launch, the variation they are served depends on the\n allocation of the various feature variations used for the launch.

                                                                      \n

                                                                      If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might \n be assigned to a variation in the experiment. The chance of this\n depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, \n the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                                                      \n

                                                                      If the user is not assigned to a launch or experiment, they are served the default variation.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "dataplane." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/evaluations", + "code": 200 + }, + "smithy.api#tags": ["dataplane"] + } + }, + "com.amazonaws.evidently#BatchEvaluateFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature being evaluated.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "requests": { + "target": "com.amazonaws.evidently#EvaluationRequestsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures, where each structure assigns a feature variation to one user session.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#BatchEvaluateFeatureResponse": { + "type": "structure", + "members": { + "results": { + "target": "com.amazonaws.evidently#EvaluationResultsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures, where each structure displays the results of one feature evaluation\n assignment to one user session.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#ChangeDirectionEnum": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INCREASE", + "name": "INCREASE" + }, + { + "value": "DECREASE", + "name": "DECREASE" + } + ] + } + }, + "com.amazonaws.evidently#CloudWatchLogsDestination": { + "type": "structure", + "members": { + "logGroup": { + "target": "com.amazonaws.evidently#CwLogGroupSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the log group where the project stores evaluation\n events.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the CloudWatch Logs log group where the project stores evaluation\n events.

                                                                      " + } + }, + "com.amazonaws.evidently#CloudWatchLogsDestinationConfig": { + "type": "structure", + "members": { + "logGroup": { + "target": "com.amazonaws.evidently#CwLogGroupSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the log group where the project stores evaluation\n events.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the CloudWatch Logs log group where the project stores evaluation\n events.

                                                                      " + } + }, + "com.amazonaws.evidently#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource that caused the exception.

                                                                      " + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the resource that is associated with the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A resource was in an inconsistent state during an update or a deletion.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.evidently#CreateExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#CreateExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#CreateExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates an Evidently experiment. Before you create an experiment,\n you must create the feature to use for the experiment.

                                                                      \n

                                                                      An experiment helps you make feature design \n decisions based on evidence and data. An experiment can test as\n many as five variations at once. Evidently collects experiment data and analyzes it by statistical methods, and provides\n clear recommendations about which variations perform better.

                                                                      \n

                                                                      Don't use this operation to update an existing experiment. Instead, use \n UpdateExperiment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/experiments", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#CreateExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that you want to create the new experiment in.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      A name for the new experiment.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the experiment.

                                                                      " + } + }, + "treatments": { + "target": "com.amazonaws.evidently#TreatmentConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that describe the configuration of each feature variation used in the experiment.

                                                                      ", + "smithy.api#required": {} + } + }, + "metricGoals": { + "target": "com.amazonaws.evidently#MetricGoalConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that defines the metrics used for the experiment, and whether a higher\n or lower value for each metric is the goal.

                                                                      ", + "smithy.api#required": {} + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      When Evidently assigns a particular user session to an experiment, it must use a randomization ID\n to determine which variation the user session is served. This randomization ID is a combination of the entity ID\n and randomizationSalt. If you omit randomizationSalt, Evidently uses\n the experiment name as the randomizationSalt.

                                                                      " + } + }, + "samplingRate": { + "target": "com.amazonaws.evidently#SplitWeight", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "

                                                                      The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience\n is the total audience minus the audience that you have allocated to overrides or current launches of\n this feature.

                                                                      \n

                                                                      This is represented in thousandths of a percent. For example, specify 10,000 to allocate 10% of the available audience.

                                                                      " + } + }, + "onlineAbConfig": { + "target": "com.amazonaws.evidently#OnlineAbConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of which variation to use as the \"control\"\n version. tThe \"control\" version is used for comparison with other variations. This structure\n also specifies how much experiment traffic is allocated to each variation.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the experiment.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n \n

                                                                      You can associate as many as 50 tags with an experiment.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#CreateExperimentResponse": { + "type": "structure", + "members": { + "experiment": { + "target": "com.amazonaws.evidently#Experiment", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the experiment\n that you created.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#CreateFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#CreateFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#CreateFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates an Evidently feature that you want to launch or test. You can define up to \n five variations of a feature, and use these variations in your launches and experiments. A feature must be created in\n a project. For information about creating a project, see CreateProject.

                                                                      \n

                                                                      Don't use this operation to update an existing feature. Instead, use \n UpdateFeature.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/features", + "code": 200 + } + } + }, + "com.amazonaws.evidently#CreateFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that is to contain the new feature.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name for the new feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "evaluationStrategy": { + "target": "com.amazonaws.evidently#FeatureEvaluationStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      Specify ALL_RULES to activate the traffic allocation specified by any\n ongoing launches or experiments. Specify DEFAULT_VARIATION to serve the default\n variation to all users instead.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the feature.

                                                                      " + } + }, + "variations": { + "target": "com.amazonaws.evidently#VariationConfigsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration of the feature's different variations.

                                                                      ", + "smithy.api#required": {} + } + }, + "defaultVariation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation to use as the default variation. The default\n variation is served to users who are not allocated to any ongoing launches\n or experiments of this feature.

                                                                      \n

                                                                      This variation must also be listed in the variations structure.

                                                                      \n

                                                                      If you omit defaultVariation, the first variation listed in\n the variations structure is used as the default variation.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the feature.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n \n

                                                                      You can associate as many as 50 tags with a feature.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      " + } + }, + "entityOverrides": { + "target": "com.amazonaws.evidently#EntityOverrideMap", + "traits": { + "smithy.api#documentation": "

                                                                      Specify users that should always be served a specific variation of a feature. Each user\n is specified by a key-value pair . For each key, specify a user by entering their user ID,\n account ID, or some other identifier. For the value, specify the name of the variation that\n they are to be served.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#CreateFeatureResponse": { + "type": "structure", + "members": { + "feature": { + "target": "com.amazonaws.evidently#Feature", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the new feature.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#CreateLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#CreateLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#CreateLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a launch of a given feature. Before you create a launch, you\n must create the feature to use for the launch.

                                                                      \n

                                                                      You can use a launch to safely validate new features by serving them to a specified\n percentage of your users while you roll out the feature. You can monitor the performance of\n the new feature to help you decide when to ramp up traffic to more users. This helps you\n reduce risk and identify unintended consequences before you fully launch the feature.

                                                                      \n

                                                                      Don't use this operation to update an existing launch. Instead, use \n UpdateLaunch.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/launches", + "code": 200 + } + } + }, + "com.amazonaws.evidently#CreateLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that you want to create the launch in.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name for the new launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description for the launch.

                                                                      " + } + }, + "scheduledSplitsConfig": { + "target": "com.amazonaws.evidently#ScheduledSplitsLaunchConfig", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of the launch.

                                                                      " + } + }, + "metricMonitors": { + "target": "com.amazonaws.evidently#MetricMonitorConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the metrics that will be used to monitor \n the launch performance.

                                                                      " + } + }, + "groups": { + "target": "com.amazonaws.evidently#LaunchGroupConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contains the feature and variations that are to be used for the launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      When Evidently assigns a particular user session to a launch, it must use a randomization ID\n to determine which variation the user session is served. This randomization ID is a combination of the entity ID\n and randomizationSalt. If you omit randomizationSalt, Evidently uses\n the launch name as the randomizationsSalt.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the launch.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n \n

                                                                      You can associate as many as 50 tags with a launch.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#CreateLaunchResponse": { + "type": "structure", + "members": { + "launch": { + "target": "com.amazonaws.evidently#Launch", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of the launch that was created.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#CreateProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#CreateProjectRequest" + }, + "output": { + "target": "com.amazonaws.evidently#CreateProjectResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a project, which is the logical object in Evidently that can contain features, launches, and \n experiments. Use projects to group similar features together.

                                                                      \n

                                                                      To update an existing project, use UpdateProject.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects", + "code": 200 + } + } + }, + "com.amazonaws.evidently#CreateProjectRequest": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#ProjectName", + "traits": { + "smithy.api#documentation": "

                                                                      The name for the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the project.

                                                                      " + } + }, + "dataDelivery": { + "target": "com.amazonaws.evidently#ProjectDataDeliveryConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about where Evidently is to store\n evaluation events for longer term storage, if you choose to do so. If you choose\n not to store these events, Evidently deletes them after using them to produce metrics and other experiment\n results that you can view.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the project.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n \n

                                                                      You can associate as many as 50 tags with a project.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#CreateProjectResponse": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#Project", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the created project.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#CwDimensionSafeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[\\S]+$" + } + }, + "com.amazonaws.evidently#CwLogGroupSafeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": "^[-a-zA-Z0-9._/]+$" + } + }, + "com.amazonaws.evidently#DeleteExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#DeleteExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#DeleteExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#InternalServerException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceUnavailableException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an Evidently experiment. The feature used for the experiment is not deleted.

                                                                      \n

                                                                      To stop an experiment without deleting it, use StopExperiment.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/projects/{project}/experiments/{experiment}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#DeleteExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#DeleteExperimentResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#DeleteFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#DeleteFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#DeleteFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an Evidently feature.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/projects/{project}/features/{feature}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#DeleteFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#DeleteFeatureResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#DeleteLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#DeleteLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#DeleteLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an Evidently launch. The feature used for the launch is not deleted.

                                                                      \n

                                                                      To stop a launch without deleting it, use StopLaunch.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/projects/{project}/launches/{launch}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#DeleteLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#DeleteLaunchResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#DeleteProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#DeleteProjectRequest" + }, + "output": { + "target": "com.amazonaws.evidently#DeleteProjectResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an Evidently project. Before you can delete a project, you must delete all the\n features that the project contains. To delete a feature, use DeleteFeature.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/projects/{project}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#DeleteProjectRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#DeleteProjectResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 160 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#DoubleValueList": { + "type": "list", + "member": { + "target": "smithy.api#Double" + }, + "traits": { + "smithy.api#length": { + "max": 100800 + } + } + }, + "com.amazonaws.evidently#EntityId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#EntityOverrideMap": { + "type": "map", + "key": { + "target": "com.amazonaws.evidently#EntityId" + }, + "value": { + "target": "com.amazonaws.evidently#VariationName" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.evidently#ErrorCodeEnum": { + "type": "string", + "traits": { + "smithy.api#documentation": "Recommended errors from AWS API standards: https://w.amazon.com/bin/view/AWS/API_Standards/Exceptions\nErrorCode = \"ValidationException\"|\"ServiceQuotaExceededException\"|\"AccessDeniedException\"|\"ResourceNotFoundException\"|\"ConflictException\"|\"ThrottlingException\"|\"InternalServerException\"|string;", + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.evidently#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#EvaluateFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#EvaluateFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#EvaluateFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      This operation assigns a feature variation to one given user session. You pass in an\n entityID that represents the user. Evidently then checks the evaluation rules\n and assigns the variation.

                                                                      \n

                                                                      The first rules that are evaluated are the override rules. If the user's\n entityID matches an override rule, the user is served the variation specified\n by that rule.

                                                                      \n

                                                                      Next, if there is a launch of the feature, the user might be assigned to a variation in\n the launch. The chance of this depends on the percentage of users that are allocated to that\n launch. If the user is enrolled in the launch, the variation they are served depends on the\n allocation of the various feature variations used for the launch.

                                                                      \n

                                                                      If the user is not assigned to a launch, and there is an ongoing experiment for this feature, the user might \n be assigned to a variation in the experiment. The chance of this\n depends on the percentage of users that are allocated to that experiment. If the user is enrolled in the experiment, \n the variation they are served depends on the allocation of the various feature variations used for the experiment.

                                                                      \n

                                                                      If the user is not assigned to a launch or experiment, they are served the default variation.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "dataplane." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/evaluations/{feature}", + "code": 200 + }, + "smithy.api#tags": ["dataplane"] + } + }, + "com.amazonaws.evidently#EvaluateFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains this feature.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature being evaluated.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.evidently#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      An internal ID that represents a unique user of the application. This\n entityID is checked against any override rules assigned for this\n feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "evaluationContext": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      A JSON block of attributes that you can optionally pass in. This JSON block is included\n in the evaluation events sent to Evidently from the user session.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#EvaluateFeatureResponse": { + "type": "structure", + "members": { + "variation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that was served to the user session.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.evidently#VariableValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value assigned to this variation to differentiate it from the other variations of this feature.

                                                                      " + } + }, + "reason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the reason that the user session was assigned this variation. Possible values\n include DEFAULT, meaning the user was served the default variation;\n LAUNCH_RULE_MATCH, if the user session was enrolled in a launch;\n EXPERIMENT_RULE_MATCH, if the user session was enrolled in an experiment; or\n ENTITY_OVERRIDES_MATCH, if the user's entityId matches an override\n rule.

                                                                      " + } + }, + "details": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      If this user was assigned to a launch or experiment, this field lists the launch or experiment name.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#EvaluationRequest": { + "type": "structure", + "members": { + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature being evaluated.

                                                                      ", + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.evidently#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      An internal ID that represents a unique user session of the application. This\n entityID is checked against any override rules assigned for this\n feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "evaluationContext": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      A JSON block of attributes that you can optionally pass in. This JSON block is included\n in the evaluation events sent to Evidently from the user session.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure assigns a feature variation to one user session.

                                                                      " + } + }, + "com.amazonaws.evidently#EvaluationRequestsList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#EvaluationRequest" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.evidently#EvaluationResult": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature being evaluated.

                                                                      " + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature being evaluated.

                                                                      ", + "smithy.api#required": {} + } + }, + "variation": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that was served to the user session.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.evidently#VariableValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value assigned to this variation to differentiate it from the other variations of this feature.

                                                                      " + } + }, + "entityId": { + "target": "com.amazonaws.evidently#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      An internal ID that represents a unique user session of the application.

                                                                      ", + "smithy.api#required": {} + } + }, + "reason": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the reason that the user session was assigned this variation. Possible values\n include DEFAULT, meaning the user was served the default variation;\n LAUNCH_RULE_MATCH, if the user session was enrolled in a launch; or\n EXPERIMENT_RULE_MATCH, if the user session was enrolled in an\n experiment.

                                                                      " + } + }, + "details": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      If this user was assigned to a launch or experiment, this field lists the launch or experiment name.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure displays the results of one feature evaluation assignment to one user\n session.

                                                                      " + } + }, + "com.amazonaws.evidently#EvaluationResultsList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#EvaluationResult" + } + }, + "com.amazonaws.evidently#EvaluationRule": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#RuleName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment or launch.

                                                                      " + } + }, + "type": { + "target": "com.amazonaws.evidently#RuleType", + "traits": { + "smithy.api#documentation": "

                                                                      This value is aws.evidently.splits if this is an evaluation rule for a launch, and it is\n aws.evidently.onlineab if this is an evaluation rule for an experiment.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the information about an evaluation rule for this feature, \n if it is used in a launch or experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#EvaluationRulesList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#EvaluationRule" + } + }, + "com.amazonaws.evidently#Event": { + "type": "structure", + "members": { + "timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The timestamp of the event.

                                                                      ", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.evidently#EventType", + "traits": { + "smithy.api#documentation": "

                                                                      \n aws.evidently.evaluation specifies an evaluation event, which determines\n which feature variation that a user sees. aws.evidently.custom specifies a custom\n event, which generates metrics from user actions such as clicks and checkouts.

                                                                      ", + "smithy.api#required": {} + } + }, + "data": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      The event data.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the information about one evaluation event or custom event sent to Evidently.\n This is a JSON payload. If this event specifies a pre-defined event type, the payload must follow the \n defined event schema.

                                                                      " + } + }, + "com.amazonaws.evidently#EventList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#Event" + }, + "traits": { + "smithy.api#length": { + "max": 50 + } + } + }, + "com.amazonaws.evidently#EventType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "aws.evidently.evaluation", + "name": "EVALUATION" + }, + { + "value": "aws.evidently.custom", + "name": "CUSTOM" + } + ] + } + }, + "com.amazonaws.evidently#Evidently": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Evidently", + "arnNamespace": "evidently", + "cloudFormationName": "Evidently", + "cloudTrailEventSource": "evidently.amazonaws.com", + "endpointPrefix": "evidently" + }, + "aws.auth#sigv4": { + "name": "evidently" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "Authorization", + "x-amz-content-sha256", + "X-Amz-Date", + "X-Amz-Security-Token", + "X-Api-Key", + "Content-Type", + "X-Amz-User-Agent" + ] + }, + "smithy.api#documentation": "

                                                                      You can use Amazon CloudWatch Evidently to safely validate new features by serving them to a specified percentage\n of your users while you roll out the feature. You can monitor the performance of the new feature\n to help you decide when to ramp up traffic to your users. This helps you \n reduce risk and identify unintended consequences before you fully launch the feature.

                                                                      \n

                                                                      You can also conduct A/B experiments to make feature design decisions based on evidence\n and data. An experiment can test as many as five variations at once. Evidently collects\n experiment data and analyzes it using statistical methods. It also provides clear\n recommendations about which variations perform better. You can test both user-facing features\n and backend features.

                                                                      ", + "smithy.api#title": "Amazon CloudWatch Evidently" + }, + "version": "2021-02-01", + "operations": [ + { + "target": "com.amazonaws.evidently#ListTagsForResource" + }, + { + "target": "com.amazonaws.evidently#TagResource" + }, + { + "target": "com.amazonaws.evidently#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.evidently#ProjectResource" + } + ] + }, + "com.amazonaws.evidently#Experiment": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#ExperimentArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the experiment.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment.

                                                                      ", + "smithy.api#required": {} + } + }, + "project": { + "target": "com.amazonaws.evidently#ProjectArn", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains this experiment.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.evidently#ExperimentStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the experiment.

                                                                      ", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      If the experiment was stopped, this is the string that was entered by the person who \n stopped the experiment, to explain why it was stopped.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the experiment.

                                                                      " + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment is first created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "schedule": { + "target": "com.amazonaws.evidently#ExperimentSchedule", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the time and date that Evidently completed the analysis of the experiment.

                                                                      " + } + }, + "execution": { + "target": "com.amazonaws.evidently#ExperimentExecution", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the date and time that the experiment started and ended.

                                                                      " + } + }, + "treatments": { + "target": "com.amazonaws.evidently#TreatmentList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that describe the configuration of each feature variation used in the experiment.

                                                                      " + } + }, + "metricGoals": { + "target": "com.amazonaws.evidently#MetricGoalsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that defines the metrics used for the experiment, and whether a higher\n or lower value for each metric is the goal.

                                                                      " + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      This value is used when Evidently assigns a particular user session to the experiment. It\n helps create a randomization ID to determine which variation the user session is served. This\n randomization ID is a combination of the entity ID and randomizationSalt.

                                                                      " + } + }, + "samplingRate": { + "target": "com.amazonaws.evidently#SplitWeight", + "traits": { + "smithy.api#documentation": "

                                                                      In thousandths of a percent, the amount of the available audience that is allocated to this experiment. \n The available audience\n is the total audience minus the audience that you have allocated to overrides or current launches of\n this feature.

                                                                      \n

                                                                      This is represented in thousandths of a percent, so a value of 10,000 is 10% of the available audience.

                                                                      " + } + }, + "type": { + "target": "com.amazonaws.evidently#ExperimentType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of this experiment. Currently, this value must be aws.experiment.onlineab.

                                                                      ", + "smithy.api#required": {} + } + }, + "onlineAbDefinition": { + "target": "com.amazonaws.evidently#OnlineAbDefinition", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of which variation to use as the \"control\"\n version. The \"control\" version is used for comparison with other variations. This structure\n also specifies how much experiment traffic is allocated to each variation.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this experiment.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of an experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#ExperimentArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/experiment/[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#ExperimentBaseStat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Mean", + "name": "MEAN" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentExecution": { + "type": "structure", + "members": { + "startedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment started.

                                                                      " + } + }, + "endedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment ended.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains the date and time that the experiment started and ended.

                                                                      " + } + }, + "com.amazonaws.evidently#ExperimentList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#Experiment" + } + }, + "com.amazonaws.evidently#ExperimentName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#ExperimentReport": { + "type": "structure", + "members": { + "metricName": { + "target": "com.amazonaws.evidently#CwDimensionSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the metric that is analyzed in this experiment report.

                                                                      " + } + }, + "treatmentName": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that this report pertains to.

                                                                      " + } + }, + "reportName": { + "target": "com.amazonaws.evidently#ExperimentReportName", + "traits": { + "smithy.api#documentation": "

                                                                      The type of analysis used for this report.

                                                                      " + } + }, + "content": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      The content of the report.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains results of an experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#ExperimentReportList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ExperimentReport" + }, + "traits": { + "smithy.api#length": { + "max": 1000 + } + } + }, + "com.amazonaws.evidently#ExperimentReportName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BayesianInference", + "name": "BAYESIAN_INFERENCE" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentReportNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ExperimentReportName" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, + "com.amazonaws.evidently#ExperimentResource": { + "type": "resource", + "identifiers": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef" + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName" + } + }, + "create": { + "target": "com.amazonaws.evidently#CreateExperiment" + }, + "read": { + "target": "com.amazonaws.evidently#GetExperiment" + }, + "update": { + "target": "com.amazonaws.evidently#UpdateExperiment" + }, + "delete": { + "target": "com.amazonaws.evidently#DeleteExperiment" + }, + "list": { + "target": "com.amazonaws.evidently#ListExperiments" + }, + "operations": [ + { + "target": "com.amazonaws.evidently#GetExperimentResults" + }, + { + "target": "com.amazonaws.evidently#StartExperiment" + }, + { + "target": "com.amazonaws.evidently#StopExperiment" + } + ] + }, + "com.amazonaws.evidently#ExperimentResultRequestType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BaseStat", + "name": "BASE_STAT" + }, + { + "value": "TreatmentEffect", + "name": "TREATMENT_EFFECT" + }, + { + "value": "ConfidenceInterval", + "name": "CONFIDENCE_INTERVAL" + }, + { + "value": "PValue", + "name": "P_VALUE" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentResultRequestTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ExperimentResultRequestType" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, + "com.amazonaws.evidently#ExperimentResultResponseType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Mean", + "name": "MEAN" + }, + { + "value": "TreatmentEffect", + "name": "TREATMENT_EFFECT" + }, + { + "value": "ConfidenceIntervalUpperBound", + "name": "CONFIDENCE_INTERVAL_UPPERBOUND" + }, + { + "value": "ConfidenceIntervalLowerBound", + "name": "CONFIDENCE_INTERVAL_LOWERBOUND" + }, + { + "value": "PValue", + "name": "P_VALUE" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentResultsData": { + "type": "structure", + "members": { + "metricName": { + "target": "com.amazonaws.evidently#CwDimensionSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the metric.

                                                                      " + } + }, + "treatmentName": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      The treatment, or variation, that returned the values in this structure.

                                                                      " + } + }, + "resultStat": { + "target": "com.amazonaws.evidently#ExperimentResultResponseType", + "traits": { + "smithy.api#documentation": "

                                                                      The experiment statistic that these results pertain to.

                                                                      " + } + }, + "values": { + "target": "com.amazonaws.evidently#DoubleValueList", + "traits": { + "smithy.api#documentation": "

                                                                      The values for the metricName that were recorded in the experiment.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains experiment results for one metric that is monitored in\n the experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#ExperimentResultsDataList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ExperimentResultsData" + }, + "traits": { + "smithy.api#length": { + "max": 1000 + } + } + }, + "com.amazonaws.evidently#ExperimentSchedule": { + "type": "structure", + "members": { + "analysisCompleteTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time and date that Evidently completed the analysis of the experiment.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains the time and date that Evidently completed the analysis of the experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#ExperimentStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATED", + "name": "CREATED" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentStopDesiredState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] + } + }, + "com.amazonaws.evidently#ExperimentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "aws.evidently.onlineab", + "name": "ONLINE_AB_EXPERIMENT" + } + ] + } + }, + "com.amazonaws.evidently#Feature": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#FeatureArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "project": { + "target": "com.amazonaws.evidently#ProjectArn", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.evidently#FeatureStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the feature is created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the feature was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the feature.

                                                                      " + } + }, + "evaluationStrategy": { + "target": "com.amazonaws.evidently#FeatureEvaluationStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      If this value is ALL_RULES, the traffic allocation specified by\n any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION,\n the default variation is being served to all users.

                                                                      ", + "smithy.api#required": {} + } + }, + "valueType": { + "target": "com.amazonaws.evidently#VariationValueType", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the type of value used to define the different feature variations. \n For more information, see Variation types\n

                                                                      ", + "smithy.api#required": {} + } + }, + "variations": { + "target": "com.amazonaws.evidently#VariationsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration of the feature's different variations.

                                                                      ", + "smithy.api#required": {} + } + }, + "defaultVariation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that is used as the default variation. The default\n variation is served to users who are not allocated to any ongoing launches\n or experiments of this feature.

                                                                      \n

                                                                      This variation must also be listed in the variations structure.

                                                                      \n

                                                                      If you omit defaultVariation, the first variation listed in\n the variations structure is used as the default variation.

                                                                      " + } + }, + "evaluationRules": { + "target": "com.amazonaws.evidently#EvaluationRulesList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the evaluation rules for the feature.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this feature.

                                                                      " + } + }, + "entityOverrides": { + "target": "com.amazonaws.evidently#EntityOverrideMap", + "traits": { + "smithy.api#documentation": "

                                                                      A set of key-value pairs that specify users who should always be served a specific\n variation of a feature. Each key specifies a user using their user ID, account ID, or some\n other identifier. The value specifies the name of the variation that the user is to be\n served.

                                                                      \n

                                                                      For the override to be successful, the value of the key must match the entityId used \n in the EvaluateFeature operation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains information about one Evidently feature in your account.

                                                                      " + } + }, + "com.amazonaws.evidently#FeatureArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/feature/[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#FeatureEvaluationStrategy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL_RULES", + "name": "ALL_RULES" + }, + { + "value": "DEFAULT_VARIATION", + "name": "DEFAULT_VARIATION" + } + ] + } + }, + "com.amazonaws.evidently#FeatureName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#FeatureResource": { + "type": "resource", + "identifiers": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef" + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName" + } + }, + "create": { + "target": "com.amazonaws.evidently#CreateFeature" + }, + "read": { + "target": "com.amazonaws.evidently#GetFeature" + }, + "update": { + "target": "com.amazonaws.evidently#UpdateFeature" + }, + "delete": { + "target": "com.amazonaws.evidently#DeleteFeature" + }, + "list": { + "target": "com.amazonaws.evidently#ListFeatures" + } + }, + "com.amazonaws.evidently#FeatureStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AVAILABLE", + "name": "AVAILABLE" + }, + { + "value": "UPDATING", + "name": "UPDATING" + } + ] + } + }, + "com.amazonaws.evidently#FeatureSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#FeatureSummary" + } + }, + "com.amazonaws.evidently#FeatureSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.evidently#FeatureStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the feature.

                                                                      ", + "smithy.api#required": {} + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the feature is created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the feature was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "evaluationStrategy": { + "target": "com.amazonaws.evidently#FeatureEvaluationStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      If this value is ALL_RULES, the traffic allocation specified by\n any ongoing launches or experiments is being used. If this is DEFAULT_VARIATION,\n the default variation is being served to all users.

                                                                      ", + "smithy.api#required": {} + } + }, + "evaluationRules": { + "target": "com.amazonaws.evidently#EvaluationRulesList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define

                                                                      " + } + }, + "defaultVariation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that is used as the default variation. The default\n variation is served to users who are not allocated to any ongoing launches\n or experiments of this feature.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this feature.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains information about one Evidently feature in your account.

                                                                      " + } + }, + "com.amazonaws.evidently#FeatureToVariationMap": { + "type": "map", + "key": { + "target": "com.amazonaws.evidently#FeatureName" + }, + "value": { + "target": "com.amazonaws.evidently#VariationName" + } + }, + "com.amazonaws.evidently#GetExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#GetExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#GetExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the details about one experiment. You must already know the \n experiment name. To retrieve a list of experiments in your account, use ListExperiments.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/experiments/{experiment}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#GetExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment that you want to see the details of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GetExperimentResponse": { + "type": "structure", + "members": { + "experiment": { + "target": "com.amazonaws.evidently#Experiment", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the experiment.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#GetExperimentResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#GetExperimentResultsRequest" + }, + "output": { + "target": "com.amazonaws.evidently#GetExperimentResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the results of a running or completed experiment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/experiments/{experiment}/results", + "code": 200 + } + } + }, + "com.amazonaws.evidently#GetExperimentResultsRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment that you want to see the results of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment to retrieve the results of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment started.

                                                                      " + } + }, + "endTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment ended, if it is completed.

                                                                      " + } + }, + "metricNames": { + "target": "com.amazonaws.evidently#MetricNameList", + "traits": { + "smithy.api#documentation": "

                                                                      The names of the experiment metrics that you want to see the results of.

                                                                      ", + "smithy.api#required": {} + } + }, + "treatmentNames": { + "target": "com.amazonaws.evidently#TreatmentNameList", + "traits": { + "smithy.api#documentation": "

                                                                      The names of the experiment treatments that you want to see the results for.

                                                                      ", + "smithy.api#required": {} + } + }, + "baseStat": { + "target": "com.amazonaws.evidently#ExperimentBaseStat", + "traits": { + "smithy.api#documentation": "

                                                                      The statistic used to calculate experiment results. Currently the only valid value is mean, \n which uses the mean of the collected values as the statistic.

                                                                      " + } + }, + "resultStats": { + "target": "com.amazonaws.evidently#ExperimentResultRequestTypeList", + "traits": { + "smithy.api#documentation": "

                                                                      The statistics that you want to see in the returned results.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n PValue specifies to use p-values for the results. A p-value is used in hypothesis\n testing to measure how often you are willing to make a mistake in rejecting the null\n hypothesis. A general practice is to reject the null hypothesis and declare that the\n results are statistically significant when the p-value is less than 0.05.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ConfidenceInterval specifies a confidence interval for the results. The\n confidence interval represents the range of values for the chosen metric that is likely to\n contain the true difference between the baseStat of a variation and the\n baseline. Evidently returns the 95% confidence interval.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n TreatmentEffect is the difference in the statistic specified by the\n baseStat parameter between each variation and the default variation.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n BaseStat returns the statistical values collected for the metric for each\n variation. The statistic uses the same statistic specified in the baseStat\n parameter. Therefore, if baseStat is mean, this returns the mean\n of the values collected for each variation.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "reportNames": { + "target": "com.amazonaws.evidently#ExperimentReportNameList", + "traits": { + "smithy.api#documentation": "

                                                                      The names of the report types that you want to see. Currently, BayesianInference\n is the only valid value.

                                                                      " + } + }, + "period": { + "target": "com.amazonaws.evidently#ResultsPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      In seconds, the amount of time to aggregate results together.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#GetExperimentResultsResponse": { + "type": "structure", + "members": { + "resultsData": { + "target": "com.amazonaws.evidently#ExperimentResultsDataList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that include experiment results including metric names and values.

                                                                      " + } + }, + "reports": { + "target": "com.amazonaws.evidently#ExperimentReportList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that include the reports that you requested.

                                                                      " + } + }, + "timestamps": { + "target": "com.amazonaws.evidently#TimestampList", + "traits": { + "smithy.api#documentation": "

                                                                      The timestamps of each result returned.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#GetFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#GetFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#GetFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the details about one feature. You must already know the feature name. To\n retrieve a list of features in your account, use ListFeatures.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/features/{feature}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#GetFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature that you want to retrieve information for.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GetFeatureResponse": { + "type": "structure", + "members": { + "feature": { + "target": "com.amazonaws.evidently#Feature", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the feature.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GetLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#GetLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#GetLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the details about one launch. You must already know the \n launch name. To retrieve a list of launches in your account, use ListLaunches.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/launches/{launch}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#GetLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch that you want to see the details of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GetLaunchResponse": { + "type": "structure", + "members": { + "launch": { + "target": "com.amazonaws.evidently#Launch", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the launch.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#GetProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#GetProjectRequest" + }, + "output": { + "target": "com.amazonaws.evidently#GetProjectResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the details about one launch. You must already know the \n project name. To retrieve a list of projects in your account, use ListProjects.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#GetProjectRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that you want to see the details of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GetProjectResponse": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#Project", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the project.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#GroupName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#GroupToWeightMap": { + "type": "map", + "key": { + "target": "com.amazonaws.evidently#GroupName" + }, + "value": { + "target": "com.amazonaws.evidently#SplitWeight" + }, + "traits": { + "smithy.api#length": { + "max": 5 + } + } + }, + "com.amazonaws.evidently#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Unexpected error while processing the request. Retry the request.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.evidently#JsonPath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#JsonValue": { + "type": "string", + "traits": { + "smithy.api#mediaType": "application/json" + } + }, + "com.amazonaws.evidently#Launch": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#LaunchArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.evidently#LaunchStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "statusReason": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      If the launch was stopped, this is the string that was entered by the person who \n stopped the launch, to explain why it was stopped.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the launch.

                                                                      " + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the launch is created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the launch was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "execution": { + "target": "com.amazonaws.evidently#LaunchExecution", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the start and end times of the launch.

                                                                      " + } + }, + "groups": { + "target": "com.amazonaws.evidently#LaunchGroupList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the feature variations that are being used in the launch.

                                                                      " + } + }, + "metricMonitors": { + "target": "com.amazonaws.evidently#MetricMonitorList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the metrics that are being used to monitor the launch performance.

                                                                      " + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      This value is used when Evidently assigns a particular user session to the launch, to help create a randomization ID\n to determine which variation the user session is served. This randomization ID is a combination of the entity ID\n and randomizationSalt.

                                                                      " + } + }, + "type": { + "target": "com.amazonaws.evidently#LaunchType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of launch.

                                                                      ", + "smithy.api#required": {} + } + }, + "scheduledSplitsDefinition": { + "target": "com.amazonaws.evidently#ScheduledSplitsLaunchDefinition", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of the launch.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this launch.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains the configuration details of one Evidently launch.

                                                                      " + } + }, + "com.amazonaws.evidently#LaunchArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*/launch/[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#LaunchExecution": { + "type": "structure", + "members": { + "startedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the launch started.

                                                                      " + } + }, + "endedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the launch ended.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains information about the start and end times of the launch.

                                                                      " + } + }, + "com.amazonaws.evidently#LaunchGroup": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#GroupName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch group.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the launch group.

                                                                      " + } + }, + "featureVariations": { + "target": "com.amazonaws.evidently#FeatureToVariationMap", + "traits": { + "smithy.api#documentation": "

                                                                      The feature variation for this launch group. This is a key-value pair.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines one launch group in a launch. A launch group is a variation of the feature\n that you are including in the launch.

                                                                      " + } + }, + "com.amazonaws.evidently#LaunchGroupConfig": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#GroupName", + "traits": { + "smithy.api#documentation": "

                                                                      A name for this launch group.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the launch group.

                                                                      " + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The feature that this launch is using.

                                                                      ", + "smithy.api#required": {} + } + }, + "variation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The feature variation to use for this launch group.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines one launch group in a launch. A launch group is a variation of\n the feature that you are including in the launch.

                                                                      " + } + }, + "com.amazonaws.evidently#LaunchGroupConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#LaunchGroupConfig" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.evidently#LaunchGroupList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#LaunchGroup" + } + }, + "com.amazonaws.evidently#LaunchName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#LaunchResource": { + "type": "resource", + "identifiers": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef" + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName" + } + }, + "create": { + "target": "com.amazonaws.evidently#CreateLaunch" + }, + "read": { + "target": "com.amazonaws.evidently#GetLaunch" + }, + "update": { + "target": "com.amazonaws.evidently#UpdateLaunch" + }, + "delete": { + "target": "com.amazonaws.evidently#DeleteLaunch" + }, + "list": { + "target": "com.amazonaws.evidently#ListLaunches" + }, + "operations": [ + { + "target": "com.amazonaws.evidently#StartLaunch" + }, + { + "target": "com.amazonaws.evidently#StopLaunch" + } + ] + }, + "com.amazonaws.evidently#LaunchStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATED", + "name": "CREATED" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "RUNNING", + "name": "RUNNING" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] + } + }, + "com.amazonaws.evidently#LaunchStopDesiredState": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + } + ] + } + }, + "com.amazonaws.evidently#LaunchType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "aws.evidently.splits", + "name": "SCHEDULED_SPLITS_LAUNCH" + } + ] + } + }, + "com.amazonaws.evidently#LaunchesList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#Launch" + } + }, + "com.amazonaws.evidently#ListExperiments": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#ListExperimentsRequest" + }, + "output": { + "target": "com.amazonaws.evidently#ListExperimentsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns configuration details about all the experiments in the specified project.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/experiments", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "experiments", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#ListExperimentsRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to return the experiment list from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.evidently#MaxExperiments", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to include in the response.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use when requesting the next set of results. You received this token from a previous \n ListExperiments operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.evidently#ListExperimentsResponse": { + "type": "structure", + "members": { + "experiments": { + "target": "com.amazonaws.evidently#ExperimentList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration details of the experiments in the\n specified project.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use in a subsequent ListExperiments operation to return\n the next set of results.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#ListFeatures": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#ListFeaturesRequest" + }, + "output": { + "target": "com.amazonaws.evidently#ListFeaturesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns configuration details about all the features in the specified project.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/features", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "features", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#ListFeaturesRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to return the feature list from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.evidently#MaxFeatures", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to include in the response.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use when requesting the next set of results. You received this token from a previous \n ListFeatures operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.evidently#ListFeaturesResponse": { + "type": "structure", + "members": { + "features": { + "target": "com.amazonaws.evidently#FeatureSummariesList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration details of the features in the\n specified project.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use in a subsequent ListFeatures operation to return\n the next set of results.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#ListLaunches": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#ListLaunchesRequest" + }, + "output": { + "target": "com.amazonaws.evidently#ListLaunchesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns configuration details about all the launches in the specified project.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects/{project}/launches", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "launches", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#ListLaunchesRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to return the launch list from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.evidently#MaxLaunches", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to include in the response.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use when requesting the next set of results. You received this token from a previous \n ListLaunches operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.evidently#ListLaunchesResponse": { + "type": "structure", + "members": { + "launches": { + "target": "com.amazonaws.evidently#LaunchesList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration details of the launches in the\n specified project.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use in a subsequent ListLaunches operation to return\n the next set of results.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#ListProjects": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#ListProjectsRequest" + }, + "output": { + "target": "com.amazonaws.evidently#ListProjectsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns configuration details about all the projects in the current Region in your\n account.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/projects", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "projects", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#ListProjectsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.evidently#MaxProjects", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to include in the response.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use when requesting the next set of results. You received this token from a previous \n ListProjects operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.evidently#ListProjectsResponse": { + "type": "structure", + "members": { + "projects": { + "target": "com.amazonaws.evidently#ProjectSummariesList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the configuration details of the projects in the Region.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.evidently#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use in a subsequent ListProjects operation to return\n the next set of results.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.evidently#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Displays the tags associated with an Evidently resource.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.evidently#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.evidently#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource that you want to see the tags of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with the resource you specified.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#MaxExperiments": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.evidently#MaxFeatures": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.evidently#MaxLaunches": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.evidently#MaxProjects": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.evidently#MetricDefinition": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#CwDimensionSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the metric.

                                                                      " + } + }, + "entityIdKey": { + "target": "com.amazonaws.evidently#JsonPath", + "traits": { + "smithy.api#documentation": "

                                                                      The entity, such as a user or session, that does an action that causes a metric\n value to be recorded.

                                                                      " + } + }, + "valueKey": { + "target": "com.amazonaws.evidently#JsonPath", + "traits": { + "smithy.api#documentation": "

                                                                      The value that is tracked to produce the metric.

                                                                      " + } + }, + "eventPattern": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      The EventBridge event pattern that defines how the metric is recorded.

                                                                      \n

                                                                      For more information about EventBridge event patterns, see \n Amazon EventBridge event patterns.

                                                                      " + } + }, + "unitLabel": { + "target": "com.amazonaws.evidently#MetricUnitLabel", + "traits": { + "smithy.api#documentation": "

                                                                      The label for the units that the metric is measuring.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure defines a metric that is being used to evaluate the variations\n during a launch or experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricDefinitionConfig": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#CwDimensionSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      A name for the metric.

                                                                      " + } + }, + "entityIdKey": { + "target": "com.amazonaws.evidently#JsonPath", + "traits": { + "smithy.api#documentation": "

                                                                      The entity, such as a user or session, that does an action that causes a metric\n value to be recorded. An example is userDetails.userID.

                                                                      " + } + }, + "valueKey": { + "target": "com.amazonaws.evidently#JsonPath", + "traits": { + "smithy.api#documentation": "

                                                                      The value that is tracked to produce the metric.

                                                                      " + } + }, + "eventPattern": { + "target": "com.amazonaws.evidently#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      The EventBridge event pattern that defines how the metric is recorded.

                                                                      \n

                                                                      For more information about EventBridge event patterns, see \n Amazon EventBridge event patterns.

                                                                      ", + "smithy.api#length": { + "max": 1024 + } + } + }, + "unitLabel": { + "target": "com.amazonaws.evidently#MetricUnitLabel", + "traits": { + "smithy.api#documentation": "

                                                                      A label for the units that the metric is measuring.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure defines a metric that you want to use to evaluate the variations\n during a launch or experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricGoal": { + "type": "structure", + "members": { + "metricDefinition": { + "target": "com.amazonaws.evidently#MetricDefinition", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains details about the metric.

                                                                      ", + "smithy.api#required": {} + } + }, + "desiredChange": { + "target": "com.amazonaws.evidently#ChangeDirectionEnum", + "traits": { + "smithy.api#documentation": "

                                                                      \n INCREASE means that a variation with a higher number for this metric is performing \n better.

                                                                      \n

                                                                      \n DECREASE means that a variation with a lower number for this metric is performing \n better.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that tells Evidently whether higher or lower values are desired for a metric that is \n used in an experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricGoalConfig": { + "type": "structure", + "members": { + "metricDefinition": { + "target": "com.amazonaws.evidently#MetricDefinitionConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains details about the metric.

                                                                      ", + "smithy.api#required": {} + } + }, + "desiredChange": { + "target": "com.amazonaws.evidently#ChangeDirectionEnum", + "traits": { + "smithy.api#documentation": "

                                                                      \n INCREASE means that a variation with a higher number for this metric is performing \n better.

                                                                      \n

                                                                      \n DECREASE means that a variation with a lower number for this metric is performing \n better.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Use this structure to tell Evidently whether higher or lower values are desired for a metric that is \n used in an experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricGoalConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#MetricGoalConfig" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 3 + } + } + }, + "com.amazonaws.evidently#MetricGoalsList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#MetricGoal" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 3 + } + } + }, + "com.amazonaws.evidently#MetricMonitor": { + "type": "structure", + "members": { + "metricDefinition": { + "target": "com.amazonaws.evidently#MetricDefinition", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines the metric.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines a metric to be used to monitor performance of the variations during a launch.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricMonitorConfig": { + "type": "structure", + "members": { + "metricDefinition": { + "target": "com.amazonaws.evidently#MetricDefinitionConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines the metric.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines a metric to be used to monitor performance of the variations during a launch.

                                                                      " + } + }, + "com.amazonaws.evidently#MetricMonitorConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#MetricMonitorConfig" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3 + } + } + }, + "com.amazonaws.evidently#MetricMonitorList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#MetricMonitor" + }, + "traits": { + "smithy.api#length": { + "max": 3 + } + } + }, + "com.amazonaws.evidently#MetricNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#CwDimensionSafeName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1 + } + } + }, + "com.amazonaws.evidently#MetricUnitLabel": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 8192 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#OnlineAbConfig": { + "type": "structure", + "members": { + "controlTreatmentName": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that is to be the default variation that the other variations are compared to.

                                                                      " + } + }, + "treatmentWeights": { + "target": "com.amazonaws.evidently#TreatmentToWeightMap", + "traits": { + "smithy.api#documentation": "

                                                                      A set of key-value pairs. The keys are variation names, and the values are the portion\n of experiment traffic to be assigned to that variation. Specify the traffic portion in\n thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment\n traffic to that variation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of which variation to use as the \"control\"\n version. The \"control\" version is used for comparison with other variations. This structure\n also specifies how much experiment traffic is allocated to each variation.

                                                                      " + } + }, + "com.amazonaws.evidently#OnlineAbDefinition": { + "type": "structure", + "members": { + "controlTreatmentName": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation that is the default variation that the other variations are compared to.

                                                                      " + } + }, + "treatmentWeights": { + "target": "com.amazonaws.evidently#TreatmentToWeightMap", + "traits": { + "smithy.api#documentation": "

                                                                      A set of key-value pairs. The keys are variation names, and the values are the portion\n of experiment traffic to be assigned to that variation. The traffic portion is specified in\n thousandths of a percent, so 20,000 for a variation would allocate 20% of the experiment\n traffic to that variation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of which variation to use as the \"control\"\n version. The \"control\" version is used for comparison with other variations. This structure\n also specifies how much experiment traffic is allocated to each variation.

                                                                      " + } + }, + "com.amazonaws.evidently#Project": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#ProjectArn", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#ProjectName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.evidently#ProjectStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The user-entered description of the project.

                                                                      " + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the project is created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the project was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "featureCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of features currently in the project.

                                                                      " + } + }, + "launchCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of launches currently in the project. This includes all launches that have been created\n and not deleted, whether they are ongoing or not.

                                                                      " + } + }, + "activeLaunchCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of ongoing launches currently in the project.

                                                                      " + } + }, + "experimentCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of experiments currently in the project. This includes all experiments that have been created\n and not deleted, whether they are ongoing or not.

                                                                      " + } + }, + "activeExperimentCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of ongoing experiments currently in the project.

                                                                      " + } + }, + "dataDelivery": { + "target": "com.amazonaws.evidently#ProjectDataDelivery", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about where Evidently is to store\n evaluation events for longer term storage.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this project.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure defines a project, which is the logical object in Evidently that can contain features, launches, and \n experiments. Use projects to group similar features together.

                                                                      " + } + }, + "com.amazonaws.evidently#ProjectArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#ProjectDataDelivery": { + "type": "structure", + "members": { + "s3Destination": { + "target": "com.amazonaws.evidently#S3Destination", + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in an Amazon S3 bucket, this structure\n stores the bucket name and bucket prefix.

                                                                      " + } + }, + "cloudWatchLogs": { + "target": "com.amazonaws.evidently#CloudWatchLogsDestination", + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in CloudWatch Logs, this structure\n stores the log group name.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about where Evidently is to store\n evaluation events for longer term storage.

                                                                      " + } + }, + "com.amazonaws.evidently#ProjectDataDeliveryConfig": { + "type": "structure", + "members": { + "s3Destination": { + "target": "com.amazonaws.evidently#S3DestinationConfig", + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in an Amazon S3 bucket, this structure\n stores the bucket name and bucket prefix.

                                                                      " + } + }, + "cloudWatchLogs": { + "target": "com.amazonaws.evidently#CloudWatchLogsDestinationConfig", + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in CloudWatch Logs, this structure\n stores the log group name.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about where Evidently is to store\n evaluation events for longer term storage.

                                                                      " + } + }, + "com.amazonaws.evidently#ProjectName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#ProjectRef": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "([-a-zA-Z0-9._]*)|(arn:[^:]*:[^:]*:[^:]*:[^:]*:project/[-a-zA-Z0-9._]*)" + } + }, + "com.amazonaws.evidently#ProjectResource": { + "type": "resource", + "identifiers": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef" + } + }, + "create": { + "target": "com.amazonaws.evidently#CreateProject" + }, + "read": { + "target": "com.amazonaws.evidently#GetProject" + }, + "delete": { + "target": "com.amazonaws.evidently#DeleteProject" + }, + "list": { + "target": "com.amazonaws.evidently#ListProjects" + }, + "operations": [ + { + "target": "com.amazonaws.evidently#BatchEvaluateFeature" + }, + { + "target": "com.amazonaws.evidently#EvaluateFeature" + }, + { + "target": "com.amazonaws.evidently#PutProjectEvents" + }, + { + "target": "com.amazonaws.evidently#UpdateProject" + }, + { + "target": "com.amazonaws.evidently#UpdateProjectDataDelivery" + } + ], + "resources": [ + { + "target": "com.amazonaws.evidently#ExperimentResource" + }, + { + "target": "com.amazonaws.evidently#FeatureResource" + }, + { + "target": "com.amazonaws.evidently#LaunchResource" + } + ], + "traits": { + "aws.api#arn": { + "template": "{project}", + "absolute": false, + "noAccount": false, + "noRegion": false + } + } + }, + "com.amazonaws.evidently#ProjectStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AVAILABLE", + "name": "AVAILABLE" + }, + { + "value": "UPDATING", + "name": "UPDATING" + } + ] + } + }, + "com.amazonaws.evidently#ProjectSummariesList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ProjectSummary" + } + }, + "com.amazonaws.evidently#ProjectSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.evidently#ProjectArn", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.evidently#ProjectName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.evidently#ProjectStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the project.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the project.

                                                                      " + } + }, + "createdTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the project is created.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastUpdatedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the project was most recently updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "featureCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of features currently in the project.

                                                                      " + } + }, + "launchCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of launches currently in the project, including launches that are ongoing, completed, and not started yet.

                                                                      " + } + }, + "activeLaunchCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of ongoing launches currently in the project.

                                                                      " + } + }, + "experimentCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of experiments currently in the project.

                                                                      " + } + }, + "activeExperimentCount": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of experiments currently in the project.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this project.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains configuration information about an Evidently project.

                                                                      " + } + }, + "com.amazonaws.evidently#PutProjectEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#PutProjectEventsRequest" + }, + "output": { + "target": "com.amazonaws.evidently#PutProjectEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Sends performance events to Evidently. These events can be used to evaluate a launch or\n an experiment.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "dataplane." + }, + "smithy.api#examples": [ + { + "title": "Post evaluation Event for Project", + "documentation": "", + "input": { + "project": "ExampleProject", + "events": [ + { + "timestamp": 1627580583, + "type": "aws.evidently.evaluation", + "data": "{\"feature\":\"ExampleFeature\",\"entityId\":\"username@email.com\",\"entityAttributes\":{\"browser\":{\"s\":\"Chrome\"}},\"variation\":\"variationA\",\"type\":\"EXPERIMENT_RULE_MATCH\",\"details\":{\"experiment\":\"Jan2020_landing_page_banner\",\"treatment\":\"control\",\"salt\":\"ADJNC1237ASDNU\"}}" + } + ] + }, + "output": { + "failedEventCount": 0, + "eventResults": [ + { + "eventId": "e55c1f5f-309b-440e-b0d8-64506987c20f", + "errorCode": "null", + "errorMessage": "null" + } + ] + } + } + ], + "smithy.api#http": { + "method": "POST", + "uri": "/events/projects/{project}", + "code": 200 + }, + "smithy.api#tags": ["dataplane"] + } + }, + "com.amazonaws.evidently#PutProjectEventsRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to write the events to.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "events": { + "target": "com.amazonaws.evidently#EventList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of event structures that contain the performance data that is being sent to\n Evidently.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#PutProjectEventsResponse": { + "type": "structure", + "members": { + "failedEventCount": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The number of events in the operation that could not be used by Evidently.

                                                                      " + } + }, + "eventResults": { + "target": "com.amazonaws.evidently#PutProjectEventsResultEntryList", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains Evidently's response to the sent events, including an event ID and \n error codes, if any.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#PutProjectEventsResultEntry": { + "type": "structure", + "members": { + "eventId": { + "target": "com.amazonaws.evidently#Uuid", + "traits": { + "smithy.api#documentation": "

                                                                      A unique ID assigned to this PutProjectEvents operation.

                                                                      " + } + }, + "errorCode": { + "target": "com.amazonaws.evidently#ErrorCodeEnum", + "traits": { + "smithy.api#documentation": "

                                                                      If the PutProjectEvents operation has an error, the error code is returned\n here.

                                                                      " + } + }, + "errorMessage": { + "target": "com.amazonaws.evidently#ErrorMessage", + "traits": { + "smithy.api#documentation": "

                                                                      If the PutProjectEvents operation has an error, the error message is\n returned here.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains Evidently's response to the sent events, including an event ID and error codes, if any.

                                                                      " + } + }, + "com.amazonaws.evidently#PutProjectEventsResultEntryList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#PutProjectEventsResultEntry" + } + }, + "com.amazonaws.evidently#RandomizationSalt": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 127 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.evidently#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource that caused the exception.

                                                                      " + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the resource that is associated with the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request references a resource that does not exist.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.evidently#ResultsPeriod": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 300, + "max": 90000 + } + } + }, + "com.amazonaws.evidently#RuleName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.evidently#RuleType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.evidently#S3BucketSafeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 3, + "max": 63 + }, + "smithy.api#pattern": "^[a-z0-9][-a-z0-9]*[a-z0-9]$" + } + }, + "com.amazonaws.evidently#S3Destination": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.evidently#S3BucketSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the bucket in which Evidently stores evaluation events.

                                                                      " + } + }, + "prefix": { + "target": "com.amazonaws.evidently#S3PrefixSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The bucket prefix in which Evidently stores evaluation events.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in an Amazon S3 bucket, this structure\n stores the bucket name and bucket prefix.

                                                                      " + } + }, + "com.amazonaws.evidently#S3DestinationConfig": { + "type": "structure", + "members": { + "bucket": { + "target": "com.amazonaws.evidently#S3BucketSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the bucket in which Evidently stores evaluation events.

                                                                      " + } + }, + "prefix": { + "target": "com.amazonaws.evidently#S3PrefixSafeName", + "traits": { + "smithy.api#documentation": "

                                                                      The bucket prefix in which Evidently stores evaluation events.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      If the project stores evaluation events in an Amazon S3 bucket, this structure\n stores the bucket name and bucket prefix.

                                                                      " + } + }, + "com.amazonaws.evidently#S3PrefixSafeName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[-a-zA-Z0-9!_.*'()/]*$" + } + }, + "com.amazonaws.evidently#ScheduledSplit": { + "type": "structure", + "members": { + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that this step of the launch starts.

                                                                      ", + "smithy.api#required": {} + } + }, + "groupWeights": { + "target": "com.amazonaws.evidently#GroupToWeightMap", + "traits": { + "smithy.api#documentation": "

                                                                      The traffic allocation percentages among the feature variations during one step of a\n launch. This is a set of key-value pairs. The keys are variation names. The values represent\n the percentage of traffic to allocate to that variation during this step.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure defines the traffic allocation percentages among the feature\n variations during one step of a launch, and the start time of that step.

                                                                      " + } + }, + "com.amazonaws.evidently#ScheduledSplitConfig": { + "type": "structure", + "members": { + "startTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that this step of the launch starts.

                                                                      ", + "smithy.api#required": {} + } + }, + "groupWeights": { + "target": "com.amazonaws.evidently#GroupToWeightMap", + "traits": { + "smithy.api#documentation": "

                                                                      The traffic allocation percentages among the feature variations during one step of a\n launch. This is a set of key-value pairs. The keys are variation names. The values represent\n the percentage of traffic to allocate to that variation during this step.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure defines the traffic allocation percentages among the feature\n variations during one step of a launch, and the start time of that step.

                                                                      " + } + }, + "com.amazonaws.evidently#ScheduledSplitConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ScheduledSplitConfig" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 6 + } + } + }, + "com.amazonaws.evidently#ScheduledSplitsLaunchConfig": { + "type": "structure", + "members": { + "steps": { + "target": "com.amazonaws.evidently#ScheduledSplitConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of the launch. This also defines the start time of each\n step.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of a launch. This also defines the start time of each step.

                                                                      " + } + }, + "com.amazonaws.evidently#ScheduledSplitsLaunchDefinition": { + "type": "structure", + "members": { + "steps": { + "target": "com.amazonaws.evidently#ScheduledStepList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of the launch. This also defines the start time of each\n step.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of a launch. This also defines the start time of each step.

                                                                      " + } + }, + "com.amazonaws.evidently#ScheduledStepList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ScheduledSplit" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 6 + } + } + }, + "com.amazonaws.evidently#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource that caused the exception.

                                                                      " + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the resource that is associated with the error.

                                                                      " + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service that is associated with the error.

                                                                      " + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service quota that was exceeded.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request would cause a service quota to be exceeded.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.evidently#ServiceUnavailableException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The service was unavailable. Retry the request.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 503 + } + }, + "com.amazonaws.evidently#SplitWeight": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 100000 + } + } + }, + "com.amazonaws.evidently#StartExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#StartExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#StartExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Starts an existing experiment. To create an experiment, \n use CreateExperiment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/experiments/{experiment}/start", + "code": 200 + } + } + }, + "com.amazonaws.evidently#StartExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment to start.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment to start.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "analysisCompleteTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time to end the experiment.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#StartExperimentResponse": { + "type": "structure", + "members": { + "startedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      A timestamp that indicates when the experiment started.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#StartLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#StartLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#StartLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Starts an existing launch. To create a launch, \n use CreateLaunch.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/launches/{launch}/start", + "code": 200 + } + } + }, + "com.amazonaws.evidently#StartLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch to start.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch to start.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#StartLaunchResponse": { + "type": "structure", + "members": { + "launch": { + "target": "com.amazonaws.evidently#Launch", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the launch that was started.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#StopExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#StopExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#StopExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Stops an experiment that is currently running. If you stop an experiment, you can't\n resume it or restart it.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/experiments/{experiment}/cancel", + "code": 200 + } + } + }, + "com.amazonaws.evidently#StopExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment to stop.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment to stop.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "desiredState": { + "target": "com.amazonaws.evidently#ExperimentStopDesiredState", + "traits": { + "smithy.api#documentation": "

                                                                      Specify whether the experiment is to be considered COMPLETED or \n CANCELLED after it stops.

                                                                      " + } + }, + "reason": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A string that describes why you are stopping the experiment.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#StopExperimentResponse": { + "type": "structure", + "members": { + "endedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the experiment stopped.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#StopLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#StopLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#StopLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ThrottlingException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Stops a launch that is currently running. After you stop a launch, you will not be able to resume it or restart it. \n Also, it \n will not be evaluated as a rule for traffic allocation, and the traffic that was allocated to the launch\n will instead be available to the feature's experiment, if there is one. Otherwise, all traffic\n will be served the default variation after the launch is stopped.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/projects/{project}/launches/{launch}/cancel", + "code": 200 + } + } + }, + "com.amazonaws.evidently#StopLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch that you want to stop.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch to stop.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "desiredState": { + "target": "com.amazonaws.evidently#LaunchStopDesiredState", + "traits": { + "smithy.api#documentation": "

                                                                      Specify whether to consider the launch as COMPLETED or\n CANCELLED after it stops.

                                                                      " + } + }, + "reason": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A string that describes why you are stopping the launch.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#StopLaunchResponse": { + "type": "structure", + "members": { + "endedTime": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the launch stopped.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" + } + }, + "com.amazonaws.evidently#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.evidently#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.evidently#TagKey" + }, + "value": { + "target": "com.amazonaws.evidently#TagValue" + } + }, + "com.amazonaws.evidently#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.evidently#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the specified CloudWatch Evidently resource. Projects,\n features, launches, and experiments can be tagged.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n

                                                                      You can use the TagResource action with a resource that already has tags. \n If you specify a new tag key for the resource, \n this tag is appended to the list of tags associated\n with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces\n the previous value for that tag.

                                                                      \n

                                                                      You can associate as many as 50 tags with a resource.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.evidently#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the CloudWatch Evidently resource that you're adding tags to.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.evidently#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of key-value pairs to associate with the resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.evidently#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service that is associated with the error.

                                                                      " + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service quota that was exceeded.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request was denied because of request throttling. Retry the request.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.evidently#TimestampList": { + "type": "list", + "member": { + "target": "smithy.api#Timestamp" + }, + "traits": { + "smithy.api#length": { + "max": 100800 + } + } + }, + "com.amazonaws.evidently#Treatment": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of this treatment.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the treatment.

                                                                      " + } + }, + "featureVariations": { + "target": "com.amazonaws.evidently#FeatureToVariationMap", + "traits": { + "smithy.api#documentation": "

                                                                      The feature variation used for this treatment. This is a key-value pair. The key is the\n feature name, and the value is the variation name.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines one treatment in an experiment. A treatment is a variation of the feature\n that you are including in the experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#TreatmentConfig": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#TreatmentName", + "traits": { + "smithy.api#documentation": "

                                                                      A name for this treatment.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A description for this treatment.

                                                                      " + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The feature that this experiment is testing.

                                                                      ", + "smithy.api#required": {} + } + }, + "variation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation to use as this treatment in the experiment.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines one treatment in an experiment. A treatment is a variation of the feature\n that you are including in the experiment.

                                                                      " + } + }, + "com.amazonaws.evidently#TreatmentConfigList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#TreatmentConfig" + }, + "traits": { + "smithy.api#length": { + "max": 5 + } + } + }, + "com.amazonaws.evidently#TreatmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#Treatment" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 5 + } + } + }, + "com.amazonaws.evidently#TreatmentName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#TreatmentNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#TreatmentName" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.evidently#TreatmentToWeightMap": { + "type": "map", + "key": { + "target": "com.amazonaws.evidently#TreatmentName" + }, + "value": { + "target": "com.amazonaws.evidently#SplitWeight" + } + }, + "com.amazonaws.evidently#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes one or more tags from the specified resource.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.evidently#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.evidently#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the CloudWatch Evidently resource that you're removing tags from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.evidently#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys to remove from the resource.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.evidently#UpdateExperiment": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UpdateExperimentRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UpdateExperimentResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates an Evidently experiment.

                                                                      \n

                                                                      Don't use this operation to update an experiment's tag. Instead, use \n TagResource.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/projects/{project}/experiments/{experiment}", + "code": 200 + } + } + }, + "com.amazonaws.evidently#UpdateExperimentRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the experiment that you want to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "experiment": { + "target": "com.amazonaws.evidently#ExperimentName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the experiment to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the experiment.

                                                                      " + } + }, + "treatments": { + "target": "com.amazonaws.evidently#TreatmentConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the variations being tested in the experiment.

                                                                      " + } + }, + "metricGoals": { + "target": "com.amazonaws.evidently#MetricGoalConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that defines the metrics used for the experiment, and whether a higher\n or lower value for each metric is the goal.

                                                                      " + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      When Evidently assigns a particular user session to an experiment, it must use a randomization ID\n to determine which variation the user session is served. This randomization ID is a combination of the entity ID\n and randomizationSalt. If you omit randomizationSalt, Evidently uses\n the experiment name as the randomizationSalt.

                                                                      " + } + }, + "samplingRate": { + "target": "com.amazonaws.evidently#SplitWeight", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "

                                                                      The portion of the available audience that you want to allocate to this experiment, in thousandths of a percent. The available audience\n is the total audience minus the audience that you have allocated to overrides or current launches of\n this feature.

                                                                      \n

                                                                      This is represented in thousandths of a percent. For example, specify 20,000 to allocate 20% of the available audience.

                                                                      " + } + }, + "onlineAbConfig": { + "target": "com.amazonaws.evidently#OnlineAbConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the configuration of which variation o use as the \"control\"\n version. The \"control\" version is used for comparison with other variations. This structure\n also specifies how much experiment traffic is allocated to each variation.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#UpdateExperimentResponse": { + "type": "structure", + "members": { + "experiment": { + "target": "com.amazonaws.evidently#Experiment", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the configuration details of the experiment\n that was updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#UpdateFeature": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UpdateFeatureRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UpdateFeatureResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates an existing feature.

                                                                      \n

                                                                      You can't use this operation to update the tags of an existing feature. Instead, use \n TagResource.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/projects/{project}/features/{feature}", + "code": 200 + } + } + }, + "com.amazonaws.evidently#UpdateFeatureRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the feature to be updated.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "feature": { + "target": "com.amazonaws.evidently#FeatureName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the feature to be updated.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "evaluationStrategy": { + "target": "com.amazonaws.evidently#FeatureEvaluationStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      Specify ALL_RULES to activate the traffic allocation specified by any ongoing\n launches or experiments. Specify DEFAULT_VARIATION to serve the default variation\n to all users instead.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the feature.

                                                                      " + } + }, + "addOrUpdateVariations": { + "target": "com.amazonaws.evidently#VariationConfigsList", + "traits": { + "smithy.api#documentation": "

                                                                      To update variation configurations for this feature, or add new ones, specify this structure.\n In this array, include any variations that you want to add or update. If the array includes a variation name that\n already exists for this feature, it is updated. If it includes a new variation name, it is added\n as a new variation.

                                                                      " + } + }, + "removeVariations": { + "target": "com.amazonaws.evidently#VariationNameList", + "traits": { + "smithy.api#documentation": "

                                                                      Removes a variation from the feature. If the variation you specify doesn't exist, then this \n makes no change and does not report an error.

                                                                      \n

                                                                      This operation fails if you try to remove a variation that is part of an \n ongoing launch or experiment.

                                                                      " + } + }, + "defaultVariation": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation to use as the default variation. The default\n variation is served to users who are not allocated to any ongoing launches\n or experiments of this feature.

                                                                      " + } + }, + "entityOverrides": { + "target": "com.amazonaws.evidently#EntityOverrideMap", + "traits": { + "smithy.api#documentation": "

                                                                      Specified users that should always be served a specific variation of a feature. Each user\n is specified by a key-value pair . For each key, specify a user by entering their user ID,\n account ID, or some other identifier. For the value, specify the name of the variation that\n they are to be served.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#UpdateFeatureResponse": { + "type": "structure", + "members": { + "feature": { + "target": "com.amazonaws.evidently#Feature", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the updated feature.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#UpdateLaunch": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UpdateLaunchRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UpdateLaunchResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a launch of a given feature.

                                                                      \n

                                                                      Don't use this operation to update the tags of an existing launch. Instead, use \n TagResource.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/projects/{project}/launches/{launch}", + "code": 200 + } + } + }, + "com.amazonaws.evidently#UpdateLaunchRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that contains the launch that you want to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "launch": { + "target": "com.amazonaws.evidently#LaunchName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the launch that is to be updated.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description for the launch.

                                                                      " + } + }, + "groups": { + "target": "com.amazonaws.evidently#LaunchGroupConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contains the feature and variations that are to be used for\n the launch.

                                                                      " + } + }, + "metricMonitors": { + "target": "com.amazonaws.evidently#MetricMonitorConfigList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the metrics that will be used to monitor \n the launch performance.

                                                                      " + } + }, + "randomizationSalt": { + "target": "com.amazonaws.evidently#RandomizationSalt", + "traits": { + "smithy.api#documentation": "

                                                                      When Evidently assigns a particular user session to a launch, it must use a randomization ID\n to determine which variation the user session is served. This randomization ID is a combination of the entity ID\n and randomizationSalt. If you omit randomizationSalt, Evidently uses\n the launch name as the randomizationSalt.

                                                                      " + } + }, + "scheduledSplitsConfig": { + "target": "com.amazonaws.evidently#ScheduledSplitsLaunchConfig", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that define the traffic allocation percentages among the feature\n variations during each step of the launch.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#UpdateLaunchResponse": { + "type": "structure", + "members": { + "launch": { + "target": "com.amazonaws.evidently#Launch", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the new configuration of the launch that was updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#UpdateProject": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UpdateProjectRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UpdateProjectResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the description of an existing project.

                                                                      \n

                                                                      To create a new project, use CreateProject.

                                                                      \n

                                                                      Don't use this operation to update the data storage options of a project. Instead, use \n UpdateProjectDataDelivery.

                                                                      \n

                                                                      Don't use this operation to update the tags of a project. Instead, use \n TagResource.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/projects/{project}", + "code": 200 + } + } + }, + "com.amazonaws.evidently#UpdateProjectDataDelivery": { + "type": "operation", + "input": { + "target": "com.amazonaws.evidently#UpdateProjectDataDeliveryRequest" + }, + "output": { + "target": "com.amazonaws.evidently#UpdateProjectDataDeliveryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.evidently#AccessDeniedException" + }, + { + "target": "com.amazonaws.evidently#ConflictException" + }, + { + "target": "com.amazonaws.evidently#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.evidently#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.evidently#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the data storage options for this project. If you store evaluation events, you an\n keep them and analyze them on your own. If you choose not to store evaluation events,\n Evidently deletes them after using them to produce metrics and other experiment results that\n you can view.

                                                                      \n

                                                                      You can't specify both cloudWatchLogs and s3Destination in the same operation.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/projects/{project}/data-delivery", + "code": 200 + } + } + }, + "com.amazonaws.evidently#UpdateProjectDataDeliveryRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project that you want to modify the data storage options for.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "s3Destination": { + "target": "com.amazonaws.evidently#S3DestinationConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the S3 bucket name and bucket prefix where you want to store evaluation events.

                                                                      " + } + }, + "cloudWatchLogs": { + "target": "com.amazonaws.evidently#CloudWatchLogsDestinationConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing the CloudWatch Logs log group where you want to store evaluation\n events.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#UpdateProjectDataDeliveryResponse": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#Project", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing details about the project that you updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#UpdateProjectRequest": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#ProjectRef", + "traits": { + "smithy.api#documentation": "

                                                                      The name or ARN of the project to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.evidently#Description", + "traits": { + "smithy.api#documentation": "

                                                                      An optional description of the project.

                                                                      " + } + } + } + }, + "com.amazonaws.evidently#UpdateProjectResponse": { + "type": "structure", + "members": { + "project": { + "target": "com.amazonaws.evidently#Project", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing information about the updated project.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.evidently#Uuid": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{12}" + } + }, + "com.amazonaws.evidently#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String" + }, + "reason": { + "target": "com.amazonaws.evidently#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      A reason for the error.

                                                                      " + } + }, + "fieldList": { + "target": "com.amazonaws.evidently#ValidationExceptionFieldList", + "traits": { + "smithy.api#documentation": "

                                                                      The parameter that caused the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The value of a parameter in the request caused an error.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.evidently#ValidationExceptionField": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The error name.

                                                                      ", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The error message.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing an error name and message.

                                                                      " + } + }, + "com.amazonaws.evidently#ValidationExceptionFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#ValidationExceptionField" + } + }, + "com.amazonaws.evidently#ValidationExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "unknownOperation", + "name": "UNKNOWN_OPERATION" + }, + { + "value": "cannotParse", + "name": "CANNOT_PARSE" + }, + { + "value": "fieldValidationFailed", + "name": "FIELD_VALIDATION_FAILED" + }, + { + "value": "other", + "name": "OTHER" + } + ] + } + }, + "com.amazonaws.evidently#VariableValue": { + "type": "union", + "members": { + "boolValue": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      If this feature uses the Boolean variation type, this field contains the Boolean value of\n this variation.

                                                                      " + } + }, + "stringValue": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      If this feature uses the string variation type, this field contains the string value of\n this variation.

                                                                      ", + "smithy.api#length": { + "max": 512 + } + } + }, + "longValue": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      If this feature uses the long variation type, this field contains the long value of\n this variation.

                                                                      ", + "smithy.api#range": { + "min": -9007199254740991, + "max": 9007199254740991 + } + } + }, + "doubleValue": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      If this feature uses the double integer variation type, this field contains the double integer value of\n this variation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The value assigned to a feature variation. This structure must contain exactly one\n field. It can be boolValue, doubleValue, longValue, or\n stringValue.

                                                                      " + } + }, + "com.amazonaws.evidently#Variation": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.evidently#VariableValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value assigned to this variation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains the name and variation value of one variation of a feature.

                                                                      " + } + }, + "com.amazonaws.evidently#VariationConfig": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.evidently#VariationName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the variation.

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.evidently#VariableValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value assigned to this variation.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains the name and variation value of one variation of a feature.

                                                                      " + } + }, + "com.amazonaws.evidently#VariationConfigsList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#VariationConfig" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.evidently#VariationName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 127 + }, + "smithy.api#pattern": "[-a-zA-Z0-9._]*" + } + }, + "com.amazonaws.evidently#VariationNameList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#VariationName" + }, + "traits": { + "smithy.api#length": { + "max": 5 + } + } + }, + "com.amazonaws.evidently#VariationValueType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "STRING", + "name": "STRING" + }, + { + "value": "LONG", + "name": "LONG" + }, + { + "value": "DOUBLE", + "name": "DOUBLE" + }, + { + "value": "BOOLEAN", + "name": "BOOLEAN" + } + ] + } + }, + "com.amazonaws.evidently#VariationsList": { + "type": "list", + "member": { + "target": "com.amazonaws.evidently#Variation" + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/fsx.json b/codegen/sdk-codegen/aws-models/fsx.json index 4a741aa25e6f..7eea71e51b4e 100644 --- a/codegen/sdk-codegen/aws-models/fsx.json +++ b/codegen/sdk-codegen/aws-models/fsx.json @@ -42,6 +42,21 @@ }, "com.amazonaws.fsx#AWSSimbaAPIService_v20180301": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "FSx", + "arnNamespace": "fsx", + "cloudFormationName": "FSx", + "cloudTrailEventSource": "fsx.amazonaws.com", + "endpointPrefix": "fsx" + }, + "aws.auth#sigv4": { + "name": "fsx" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "

                                                                      Amazon FSx is a fully managed service that makes it easy for storage and\n application administrators to launch and use shared file storage.

                                                                      ", + "smithy.api#title": "Amazon FSx" + }, "version": "2018-03-01", "operations": [ { @@ -56,6 +71,9 @@ { "target": "com.amazonaws.fsx#CreateBackup" }, + { + "target": "com.amazonaws.fsx#CreateDataRepositoryAssociation" + }, { "target": "com.amazonaws.fsx#CreateDataRepositoryTask" }, @@ -65,6 +83,9 @@ { "target": "com.amazonaws.fsx#CreateFileSystemFromBackup" }, + { + "target": "com.amazonaws.fsx#CreateSnapshot" + }, { "target": "com.amazonaws.fsx#CreateStorageVirtualMachine" }, @@ -77,9 +98,15 @@ { "target": "com.amazonaws.fsx#DeleteBackup" }, + { + "target": "com.amazonaws.fsx#DeleteDataRepositoryAssociation" + }, { "target": "com.amazonaws.fsx#DeleteFileSystem" }, + { + "target": "com.amazonaws.fsx#DeleteSnapshot" + }, { "target": "com.amazonaws.fsx#DeleteStorageVirtualMachine" }, @@ -89,6 +116,9 @@ { "target": "com.amazonaws.fsx#DescribeBackups" }, + { + "target": "com.amazonaws.fsx#DescribeDataRepositoryAssociations" + }, { "target": "com.amazonaws.fsx#DescribeDataRepositoryTasks" }, @@ -98,6 +128,9 @@ { "target": "com.amazonaws.fsx#DescribeFileSystems" }, + { + "target": "com.amazonaws.fsx#DescribeSnapshots" + }, { "target": "com.amazonaws.fsx#DescribeStorageVirtualMachines" }, @@ -110,37 +143,34 @@ { "target": "com.amazonaws.fsx#ListTagsForResource" }, + { + "target": "com.amazonaws.fsx#ReleaseFileSystemNfsV3Locks" + }, + { + "target": "com.amazonaws.fsx#RestoreVolumeFromSnapshot" + }, { "target": "com.amazonaws.fsx#TagResource" }, { "target": "com.amazonaws.fsx#UntagResource" }, + { + "target": "com.amazonaws.fsx#UpdateDataRepositoryAssociation" + }, { "target": "com.amazonaws.fsx#UpdateFileSystem" }, + { + "target": "com.amazonaws.fsx#UpdateSnapshot" + }, { "target": "com.amazonaws.fsx#UpdateStorageVirtualMachine" }, { "target": "com.amazonaws.fsx#UpdateVolume" } - ], - "traits": { - "aws.api#service": { - "sdkId": "FSx", - "arnNamespace": "fsx", - "cloudFormationName": "FSx", - "cloudTrailEventSource": "fsx.amazonaws.com", - "endpointPrefix": "fsx" - }, - "aws.auth#sigv4": { - "name": "fsx" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

                                                                      Amazon FSx is a fully managed service that makes it easy for storage and\n application administrators to launch and use shared file storage.

                                                                      ", - "smithy.api#title": "Amazon FSx" - } + ] }, "com.amazonaws.fsx#ActiveDirectoryBackupAttributes": { "type": "structure", @@ -148,7 +178,7 @@ "DomainName": { "target": "com.amazonaws.fsx#ActiveDirectoryFullyQualifiedName", "traits": { - "smithy.api#documentation": "

                                                                      The fully qualified domain name of the self-managed AD directory.

                                                                      " + "smithy.api#documentation": "

                                                                      The fully qualified domain name of the self-managed Active Directory directory.

                                                                      " } }, "ActiveDirectoryId": { @@ -162,7 +192,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The Microsoft AD attributes of the Amazon FSx for Windows File Server file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The Microsoft Active Directory attributes of the Amazon FSx for Windows File\n Server file system.

                                                                      " } }, "com.amazonaws.fsx#ActiveDirectoryError": { @@ -244,19 +274,19 @@ "ProgressPercent": { "target": "com.amazonaws.fsx#ProgressPercent", "traits": { - "smithy.api#documentation": "

                                                                      Provides the percent complete of a STORAGE_OPTIMIZATION administrative action. \n Does not apply to any other administrative action type.

                                                                      " + "smithy.api#documentation": "

                                                                      The percentage-complete status of a STORAGE_OPTIMIZATION administrative\n action. Does not apply to any other administrative action type.

                                                                      " } }, "RequestTime": { "target": "com.amazonaws.fsx#RequestTime", "traits": { - "smithy.api#documentation": "

                                                                      Time that the administrative action request was received.

                                                                      " + "smithy.api#documentation": "

                                                                      The time that the administrative action request was received.

                                                                      " } }, "Status": { "target": "com.amazonaws.fsx#Status", "traits": { - "smithy.api#documentation": "

                                                                      Describes the status of the administrative action, as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx failed to process the administrative action successfully.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IN_PROGRESS - Amazon FSx is processing the administrative action.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - Amazon FSx is waiting to process the administrative action.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COMPLETED - Amazon FSx has finished processing the administrative task.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATED_OPTIMIZING - For a storage capacity increase update, Amazon FSx has\n updated the file system with the new storage capacity, and is now performing the\n storage optimization process. For more information, see\n Managing\n storage capacity in the Amazon FSx for Windows File Server\n User Guide and Managing storage\n and throughput capacity in the Amazon FSx for Lustre User\n Guide.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes the status of the administrative action, as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx failed to process the administrative action\n successfully.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IN_PROGRESS - Amazon FSx is processing the administrative action.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - Amazon FSx is waiting to process the administrative\n action.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COMPLETED - Amazon FSx has finished processing the administrative\n task.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATED_OPTIMIZING - For a storage-capacity increase update, Amazon FSx\n has updated the file system with the new storage capacity, and is now performing\n the storage-optimization process.

                                                                        \n
                                                                      • \n
                                                                      " } }, "TargetFileSystemValues": { @@ -270,10 +300,13 @@ }, "TargetVolumeValues": { "target": "com.amazonaws.fsx#Volume" + }, + "TargetSnapshotValues": { + "target": "com.amazonaws.fsx#Snapshot" } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a specific Amazon FSx administrative action for the current Windows or\n Lustre file system.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a specific Amazon FSx administrative action for the current Windows,\n Lustre, or OpenZFS file system.

                                                                      " } }, "com.amazonaws.fsx#AdministrativeActionFailureDetails": { @@ -293,7 +326,7 @@ "com.amazonaws.fsx#AdministrativeActionType": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      Describes the type of administrative action, as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_UPDATE - A file system update administrative action initiated by the user from the \n Amazon FSx console, API (UpdateFileSystem), or CLI (update-file-system).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n STORAGE_OPTIMIZATION - Once the FILE_SYSTEM_UPDATE \n task to increase a file system's storage capacity completes successfully, a \n STORAGE_OPTIMIZATION task starts.\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          For Windows, storage optimization is the process of migrating the file system data\n to the new, larger disks.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          For Lustre, storage optimization consists of rebalancing the data across the existing and\n newly added file servers.

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        You can track the storage optimization progress using the\n ProgressPercent property. When\n STORAGE_OPTIMIZATION completes successfully, the parent\n FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n storage capacity in the Amazon FSx for Windows File Server\n User Guide and Managing storage\n and throughput capacity in the Amazon FSx for Lustre User\n Guide.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new DNS alias with the file system. \n For more information, see\n .

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system.\n For more information, see .

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Describes the type of administrative action, as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_UPDATE - A file system update administrative action\n initiated from the Amazon FSx console, API\n (UpdateFileSystem), or CLI\n (update-file-system).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n STORAGE_OPTIMIZATION - After the FILE_SYSTEM_UPDATE\n task to increase a file system's storage capacity has been completed\n successfully, a STORAGE_OPTIMIZATION task starts.

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          For Windows, storage optimization is the process of migrating the file system data\n to the new, larger disks.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          For Lustre, storage optimization consists of rebalancing the data across the existing and\n newly added file servers.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          For OpenZFS, storage optimization consists of migrating data from the\n older smaller disks to the newer larger disks.

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        You can track the storage-optimization progress using the\n ProgressPercent property. When\n STORAGE_OPTIMIZATION has been completed successfully, the\n parent FILE_SYSTEM_UPDATE action status changes to\n COMPLETED. For more information, see Managing\n storage capacity in the Amazon FSx for Windows\n File Server User Guide, Managing storage\n and throughput capacity in the Amazon FSx for\n Lustre User Guide, and Managing storage capacity in the\n Amazon FSx for OpenZFS User Guide.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_ALIAS_ASSOCIATION - A file system update to associate a new Domain\n Name System (DNS) alias with the file system. For more information, see \n AssociateFileSystemAliases.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FILE_SYSTEM_ALIAS_DISASSOCIATION - A file system update to disassociate a DNS alias from the file system.\n For more information, see DisassociateFileSystemAliases.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n VOLUME_UPDATE - A volume update to an Amazon FSx for NetApp ONTAP or\n Amazon FSx for OpenZFS volume initiated from the Amazon FSx\n console, API (UpdateVolume), or CLI\n (update-volume).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n SNAPSHOT_UPDATE - A snapshot update to an Amazon FSx for\n OpenZFS volume initiated from the Amazon FSx console, API\n (UpdateSnapshot), or CLI (update-snapshot).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n RELEASE_NFS_V3_LOCKS - Tracks the release of Network File System\n (NFS) V3 locks on an Amazon FSx for OpenZFS file system.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#enum": [ { "value": "FILE_SYSTEM_UPDATE", @@ -310,6 +343,18 @@ { "value": "FILE_SYSTEM_ALIAS_DISASSOCIATION", "name": "FILE_SYSTEM_ALIAS_DISASSOCIATION" + }, + { + "value": "VOLUME_UPDATE", + "name": "VOLUME_UPDATE" + }, + { + "value": "SNAPSHOT_UPDATE", + "name": "SNAPSHOT_UPDATE" + }, + { + "value": "RELEASE_NFS_V3_LOCKS", + "name": "RELEASE_NFS_V3_LOCKS" } ] } @@ -483,6 +528,34 @@ "smithy.api#documentation": "

                                                                      The system generated response showing the DNS aliases that \n Amazon FSx is attempting to associate with the file system. \n Use the API \n operation to monitor the status of the aliases Amazon FSx is \n associating with the file system. It can take up to 2.5 minutes for \n the alias status to change from CREATING to AVAILABLE.

                                                                      " } }, + "com.amazonaws.fsx#AutoExportPolicy": { + "type": "structure", + "members": { + "Events": { + "target": "com.amazonaws.fsx#EventTypes", + "traits": { + "smithy.api#documentation": "

                                                                      The AutoExportPolicy can have the following event values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NEW - Amazon FSx automatically exports new files and\n directories to the data repository as they are added to the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CHANGED - Amazon FSx automatically exports changes to\n files and directories on the file system to the data repository.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Files and directories are automatically deleted\n on the data repository when they are deleted on the file system.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      You can define any combination of event types for your AutoExportPolicy.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes a data repository association's automatic export policy. The\n AutoExportPolicy defines the types of updated objects on the\n file system that will be automatically exported to the data repository.\n As you create, modify, or delete files, Amazon FSx automatically\n exports the defined changes asynchronously once your application finishes\n modifying the file.

                                                                      \n

                                                                      This AutoExportPolicy is supported only for file systems with the\n Persistent_2 deployment type.

                                                                      " + } + }, + "com.amazonaws.fsx#AutoImportPolicy": { + "type": "structure", + "members": { + "Events": { + "target": "com.amazonaws.fsx#EventTypes", + "traits": { + "smithy.api#documentation": "

                                                                      The AutoImportPolicy can have the following event values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NEW - Amazon FSx automatically imports metadata of\n files added to the linked S3 bucket that do not currently exist in the FSx\n file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CHANGED - Amazon FSx automatically updates file\n metadata and invalidates existing file content on the file system as files\n change in the data repository.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Amazon FSx automatically deletes files\n on the file system as corresponding files are deleted in the data repository.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      You can define any combination of event types for your AutoImportPolicy.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Describes the data repository association's automatic import policy.\n The AutoImportPolicy defines how Amazon FSx keeps your file metadata and directory\n listings up to date by importing changes to your file system as you modify\n objects in a linked S3 bucket.

                                                                      \n

                                                                      This AutoImportPolicy is supported only for file systems\n with the Persistent_2 deployment type.

                                                                      " + } + }, "com.amazonaws.fsx#AutoImportPolicyType": { "type": "string", "traits": { @@ -498,6 +571,10 @@ { "value": "NEW_CHANGED", "name": "NEW_CHANGED" + }, + { + "value": "NEW_CHANGED_DELETED", + "name": "NEW_CHANGED_DELETED" } ] } @@ -506,7 +583,7 @@ "type": "integer", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of days to retain automatic backups. Setting this to 0 disables\n automatic backups. You can retain automatic backups for a maximum of 90 days. The default is 0.

                                                                      ", + "smithy.api#documentation": "

                                                                      The number of days to retain automatic backups. Setting this property to\n 0 disables automatic backups. You can retain automatic backups for a\n maximum of 90 days. The default is 0.

                                                                      ", "smithy.api#range": { "min": 0, "max": 90 @@ -526,20 +603,20 @@ "Lifecycle": { "target": "com.amazonaws.fsx#BackupLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      The lifecycle status of the backup.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The backup is fully available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to S3.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COPYING - Amazon FSx is copying the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Amazon FSx deleted the backup and it is no longer available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx could not complete the backup.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The lifecycle status of the backup.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The backup is fully available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is transferring the backup to Amazon S3.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COPYING - Amazon FSx is copying the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Amazon FSx deleted the backup and it's no longer\n available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx couldn't finish the backup.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, "FailureDetails": { "target": "com.amazonaws.fsx#BackupFailureDetails", "traits": { - "smithy.api#documentation": "

                                                                      Details explaining any failures that occur when creating a backup.

                                                                      " + "smithy.api#documentation": "

                                                                      Details explaining any failures that occurred when creating a backup.

                                                                      " } }, "Type": { "target": "com.amazonaws.fsx#BackupType", "traits": { - "smithy.api#documentation": "

                                                                      The type of the file system backup.

                                                                      ", + "smithy.api#documentation": "

                                                                      The type of the file-system backup.

                                                                      ", "smithy.api#required": {} } }, @@ -556,7 +633,7 @@ "KmsKeyId": { "target": "com.amazonaws.fsx#KmsKeyId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the \n backup of the Amazon FSx file system's data at rest. \n

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the\n backup of the Amazon FSx file system's data at rest.

                                                                      " } }, "ResourceARN": { @@ -568,20 +645,20 @@ "Tags": { "target": "com.amazonaws.fsx#Tags", "traits": { - "smithy.api#documentation": "

                                                                      Tags associated with a particular file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The tags associated with a particular file system.

                                                                      " } }, "FileSystem": { "target": "com.amazonaws.fsx#FileSystem", "traits": { - "smithy.api#documentation": "

                                                                      Metadata of the file system associated with the backup. This metadata is persisted\n even if the file system is deleted.

                                                                      ", + "smithy.api#documentation": "

                                                                      The metadata of the file system associated with the backup. This metadata is persisted\n even if the file system is deleted.

                                                                      ", "smithy.api#required": {} } }, "DirectoryInformation": { "target": "com.amazonaws.fsx#ActiveDirectoryBackupAttributes", "traits": { - "smithy.api#documentation": "

                                                                      The configuration of the self-managed Microsoft Active Directory (AD) to which the Windows File Server instance is joined.

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration of the self-managed Microsoft Active Directory directory to which\n the Windows File Server instance is joined.

                                                                      " } }, "OwnerId": { @@ -599,7 +676,7 @@ "ResourceType": { "target": "com.amazonaws.fsx#ResourceType", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the resource type that is backed up.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the resource type that's backed up.

                                                                      " } }, "Volume": { @@ -607,7 +684,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A backup of an Amazon FSx for Windows File Server or Amazon FSx for Lustre file system,\n or of an Amazon FSx for NetApp ONTAP volume.

                                                                      " + "smithy.api#documentation": "

                                                                      A backup of an Amazon FSx for Windows File Server, Amazon FSx for\n Lustre file system, Amazon FSx for NetApp ONTAP volume, or Amazon FSx\n for OpenZFS file system.

                                                                      " } }, "com.amazonaws.fsx#BackupBeingCopied": { @@ -631,7 +708,7 @@ "Message": { "target": "com.amazonaws.fsx#ErrorMessage", "traits": { - "smithy.api#documentation": "

                                                                      A message describing the backup creation failure.

                                                                      " + "smithy.api#documentation": "

                                                                      A message describing the backup-creation failure.

                                                                      " } } }, @@ -642,7 +719,7 @@ "com.amazonaws.fsx#BackupId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the source backup. Specifies the backup you are copying.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the source backup. Specifies the backup that you are copying.

                                                                      ", "smithy.api#length": { "min": 12, "max": 128 @@ -678,7 +755,7 @@ "com.amazonaws.fsx#BackupLifecycle": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The lifecycle status of the backup.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The backup is fully available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx has not started creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new user-intiated backup

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is backing up the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COPYING - Amazon FSx is copying the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Amazon FSx deleted the backup and it is no longer available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx could not complete the backup.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The lifecycle status of the backup.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The backup is fully available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - For user-initiated backups on Lustre file systems only; Amazon FSx hasn't started creating the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new user-initiated backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n TRANSFERRING - For user-initiated backups on Lustre file systems only; Amazon FSx is backing up the file\n system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n COPYING - Amazon FSx is copying the backup.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETED - Amazon FSx deleted the backup and it's no longer\n available.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx couldn't finish the backup.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#enum": [ { "value": "AVAILABLE", @@ -786,6 +863,12 @@ "smithy.api#error": "client" } }, + "com.amazonaws.fsx#BatchImportMetaDataOnCreate": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.fsx#CancelDataRepositoryTask": { "type": "operation", "input": { @@ -946,7 +1029,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region\n (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five\n backup copy requests in progress to a single destination Region per account.

                                                                      \n

                                                                      You can use cross-Region backup copies for cross-region disaster recovery.\n You periodically take backups and copy them to another Region so that in the\n event of a disaster in the primary Region, you can restore from backup and recover\n availability quickly in the other Region. You can make cross-Region copies\n only within your Amazon Web Services partition.

                                                                      \n

                                                                      You can also use backup copies to clone your file data set to another Region\n or within the same Region.

                                                                      \n

                                                                      You can use the SourceRegion parameter to specify the Amazon Web Services Region\n from which the backup will be copied. For example, if you make the call from the\n us-west-1 Region and want to copy a backup from the us-east-2\n Region, you specify us-east-2 in the SourceRegion parameter\n to make a cross-Region copy. If you don't specify a Region, the backup copy is\n created in the same Region where the request is sent from (in-Region copy).

                                                                      \n

                                                                      For more information on creating backup copies, see \n \n Copying backups in the Amazon FSx for Windows User Guide and \n Copying backups \n in the Amazon FSx for Lustre User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      Copies an existing backup within the same Amazon Web Services account to another Amazon Web Services Region\n (cross-Region copy) or within the same Amazon Web Services Region (in-Region copy). You can have up to five\n backup copy requests in progress to a single destination Region per account.

                                                                      \n

                                                                      You can use cross-Region backup copies for cross-Region disaster recovery. You can\n periodically take backups and copy them to another Region so that in the event of a\n disaster in the primary Region, you can restore from backup and recover availability\n quickly in the other Region. You can make cross-Region copies only within your Amazon Web Services partition. A partition is a grouping of Regions. Amazon Web Services currently\n has three partitions: aws (Standard Regions), aws-cn (China\n Regions), and aws-us-gov (Amazon Web Services GovCloud [US] Regions).

                                                                      \n

                                                                      You can also use backup copies to clone your file dataset to another Region or within\n the same Region.

                                                                      \n

                                                                      You can use the SourceRegion parameter to specify the Amazon Web Services Region\n from which the backup will be copied. For example, if you make the call from the\n us-west-1 Region and want to copy a backup from the us-east-2\n Region, you specify us-east-2 in the SourceRegion parameter\n to make a cross-Region copy. If you don't specify a Region, the backup copy is\n created in the same Region where the request is sent from (in-Region copy).

                                                                      \n

                                                                      For more information about creating backup copies, see Copying backups\n in the Amazon FSx for Windows User Guide, Copying backups in the Amazon FSx for Lustre User\n Guide, and Copying backups in the Amazon FSx for OpenZFS User\n Guide.

                                                                      ", "smithy.api#idempotent": {} } }, @@ -962,14 +1045,14 @@ "SourceBackupId": { "target": "com.amazonaws.fsx#SourceBackupId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the source backup. Specifies the ID of the backup that is\n being copied.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the source backup. Specifies the ID of the backup that's being copied.

                                                                      ", "smithy.api#required": {} } }, "SourceRegion": { "target": "com.amazonaws.fsx#Region", "traits": { - "smithy.api#documentation": "

                                                                      The source Amazon Web Services Region of the backup. Specifies the Amazon Web Services Region from which\n the backup is being copied. The source and destination Regions must be in\n the same Amazon Web Services partition. If you don't specify a Region, it defaults to\n the Region where the request is sent from (in-Region copy).

                                                                      " + "smithy.api#documentation": "

                                                                      The source Amazon Web Services Region of the backup. Specifies the Amazon Web Services Region from which the backup is being copied. The source and destination\n Regions must be in the same Amazon Web Services partition. If you don't specify a\n Region, SourceRegion defaults to the Region where the request is sent from\n (in-Region copy).

                                                                      " } }, "KmsKeyId": { @@ -978,7 +1061,7 @@ "CopyTags": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                                                                      A boolean flag indicating whether tags from the source backup\n should be copied to the backup copy. This value defaults to false.

                                                                      \n

                                                                      If you set CopyTags to true and the source backup has\n existing tags, you can use the Tags parameter to create new\n tags, provided that the sum of the source backup tags and the new tags\n doesn't exceed 50. Both sets of tags are merged. If there are tag\n conflicts (for example, two tags with the same key but different values),\n the tags created with the Tags parameter take precedence.

                                                                      " + "smithy.api#documentation": "

                                                                      A Boolean flag indicating whether tags from the source backup should be copied to the\n backup copy. This value defaults to false.

                                                                      \n

                                                                      If you set CopyTags to true and the source backup has existing\n tags, you can use the Tags parameter to create new tags, provided that the sum\n of the source backup tags and the new tags doesn't exceed 50. Both sets of tags are\n merged. If there are tag conflicts (for example, two tags with the same key but different\n values), the tags created with the Tags parameter take precedence.

                                                                      " } }, "Tags": { @@ -1029,7 +1112,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a backup of an existing Amazon FSx for Windows File Server\n or Amazon FSx for Lustre file system, or of an Amazon FSx for NetApp ONTAP\n volume. Creating regular backups is a best practice, enabling you to restore\n a file system or volume from a backup if an issue arises with the original\n file system or volume.

                                                                      \n

                                                                      For Amazon FSx for Lustre file systems, you can create a backup only \n for file systems with the following configuration:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        a Persistent deployment type

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        is not linked to a data repository.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about backups, see the following:

                                                                      \n \n

                                                                      If a backup with the specified client request token exists, and the parameters\n match, this operation returns the description of the existing backup. If a backup\n specified client request token exists, and the parameters don't match, this\n operation returns IncompatibleParameterError. If a backup with the\n specified client request token doesn't exist, CreateBackup does the\n following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new Amazon FSx backup with an assigned ID, and an initial\n lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the backup.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      By using the idempotent operation, you can retry a CreateBackup\n operation without the risk of creating an extra backup. This approach can be useful when\n an initial call fails in a way that makes it unclear whether a backup was created. If\n you use the same client request token and the initial call created a backup, the\n operation returns a successful result because all the parameters are the same.

                                                                      \n\n

                                                                      The CreateBackup operation returns while the backup's\n lifecycle state is still CREATING. You can check the backup creation\n status by calling the DescribeBackups operation, which returns the\n backup state along with other information.

                                                                      ", + "smithy.api#documentation": "

                                                                      Creates a backup of an existing Amazon FSx for Windows File Server file\n system, Amazon FSx for Lustre file system, Amazon FSx for NetApp ONTAP\n volume, or Amazon FSx for OpenZFS file system. We recommend creating regular\n backups so that you can restore a file system or volume from a backup if an issue arises\n with the original file system or volume.

                                                                      \n

                                                                      For Amazon FSx for Lustre file systems, you can create a backup only for file\n systems that have the following configuration:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        A Persistent deployment type

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Are not linked to a data repository

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about backups, see the following:

                                                                      \n \n

                                                                      If a backup with the specified client request token exists and the parameters match,\n this operation returns the description of the existing backup. If a backup with the\n specified client request token exists and the parameters don't match, this operation\n returns IncompatibleParameterError. If a backup with the specified client\n request token doesn't exist, CreateBackup does the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new Amazon FSx backup with an assigned ID, and an initial\n lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the backup.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      By using the idempotent operation, you can retry a CreateBackup\n operation without the risk of creating an extra backup. This approach can be useful when\n an initial call fails in a way that makes it unclear whether a backup was created. If\n you use the same client request token and the initial call created a backup, the\n operation returns a successful result because all the parameters are the same.

                                                                      \n\n

                                                                      The CreateBackup operation returns while the backup's lifecycle state is\n still CREATING. You can check the backup creation status by calling the\n DescribeBackups operation, which returns the backup state along with other\n information.

                                                                      ", "smithy.api#idempotent": {} } }, @@ -1045,20 +1128,20 @@ "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent creation. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      (Optional) A string of up to 64 ASCII characters that Amazon FSx uses to\n ensure idempotent creation. This string is automatically filled on your behalf when you\n use the Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } }, "Tags": { "target": "com.amazonaws.fsx#Tags", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) The tags to apply to the backup at backup creation. The key value of the\n Name tag appears in the console as the backup name. If you have set CopyTagsToBackups to true, and \n you specify one or more tags using the CreateBackup action, no existing file system tags are copied from the file system to the backup.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) The tags to apply to the backup at backup creation. The key value of the\n Name tag appears in the console as the backup name. If you have set\n CopyTagsToBackups to true, and you specify one or more\n tags using the CreateBackup operation, no existing file system tags are\n copied from the file system to the backup.

                                                                      " } }, "VolumeId": { "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of he FSx for NetApp ONTAP volume to back up.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) The ID of the FSx for ONTAP volume to back up.

                                                                      " } } }, @@ -1080,6 +1163,102 @@ "smithy.api#documentation": "

                                                                      The response object for the CreateBackup operation.

                                                                      " } }, + "com.amazonaws.fsx#CreateDataRepositoryAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#CreateDataRepositoryAssociationRequest" + }, + "output": { + "target": "com.amazonaws.fsx#CreateDataRepositoryAssociationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#IncompatibleParameterError" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + }, + { + "target": "com.amazonaws.fsx#UnsupportedOperation" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates an Amazon FSx for Lustre data repository association (DRA). A data\n repository association is a link between a directory on the file system and\n an Amazon S3 bucket or prefix. You can have a maximum of 8 data repository\n associations on a file system. Data repository associations are supported only\n for file systems with the Persistent_2 deployment type.

                                                                      \n

                                                                      Each data repository association must have a unique Amazon FSx file\n system directory and a unique S3 bucket or prefix associated with it. You\n can configure a data repository association for automatic import only,\n for automatic export only, or for both. To learn more about linking a\n data repository to your file system, see \n Linking your file system to an S3 bucket.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#CreateDataRepositoryAssociationRequest": { + "type": "structure", + "members": { + "FileSystemId": { + "target": "com.amazonaws.fsx#FileSystemId", + "traits": { + "smithy.api#required": {} + } + }, + "FileSystemPath": { + "target": "com.amazonaws.fsx#Namespace", + "traits": { + "smithy.api#documentation": "

                                                                      A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                                                                      \n

                                                                      This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                                                                      ", + "smithy.api#required": {} + } + }, + "DataRepositoryPath": { + "target": "com.amazonaws.fsx#ArchivePath", + "traits": { + "smithy.api#documentation": "

                                                                      The path to the Amazon S3 data repository that will be linked to the file\n system. The path can be an S3 bucket or prefix in the format\n s3://myBucket/myPrefix/. This path specifies where in the S3\n data repository files will be imported from or exported to.

                                                                      ", + "smithy.api#required": {} + } + }, + "BatchImportMetaDataOnCreate": { + "target": "com.amazonaws.fsx#BatchImportMetaDataOnCreate", + "traits": { + "smithy.api#documentation": "

                                                                      Set to true to run an import data repository task to import\n metadata from the data repository to the file system after the data repository\n association is created. Default is false.

                                                                      " + } + }, + "ImportedFileChunkSize": { + "target": "com.amazonaws.fsx#Megabytes", + "traits": { + "smithy.api#documentation": "

                                                                      For files imported from a data repository, this value determines the stripe count and\n maximum amount of data per file (in MiB) stored on a single physical disk. The maximum\n number of disks that a single file can be striped across is limited by the total number\n of disks that make up the file system.

                                                                      \n \n

                                                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500\n GiB). Amazon S3 objects have a maximum size of 5 TB.

                                                                      " + } + }, + "S3": { + "target": "com.amazonaws.fsx#S3DataRepositoryConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for an Amazon S3 data repository linked to an\n Amazon FSx Lustre file system with a data repository association.\n The configuration defines which file events (new, changed, or\n deleted files or directories) are automatically imported from\n the linked data repository to the file system or automatically\n exported from the file system to the data repository.

                                                                      " + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "Tags": { + "target": "com.amazonaws.fsx#Tags" + } + } + }, + "com.amazonaws.fsx#CreateDataRepositoryAssociationResponse": { + "type": "structure", + "members": { + "Association": { + "target": "com.amazonaws.fsx#DataRepositoryAssociation", + "traits": { + "smithy.api#documentation": "

                                                                      The response object returned after the data repository association is created.

                                                                      " + } + } + } + }, "com.amazonaws.fsx#CreateDataRepositoryTask": { "type": "operation", "input": { @@ -1112,7 +1291,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates an Amazon FSx for Lustre data repository task. You use data repository tasks\n to perform bulk operations between your Amazon FSx file system and its linked data\n repository. An example of a data repository task is\n exporting any data and metadata changes, including POSIX metadata, to files, directories, and symbolic links (symlinks) from your FSx file system to its linked data repository. A\n CreateDataRepositoryTask operation will fail if a data repository is not\n linked to the FSx file system. To learn more about data repository tasks, see \n Data Repository Tasks. \n To learn more about linking a data repository to your file system, see \n Linking your file system to an S3 bucket.

                                                                      ", + "smithy.api#documentation": "

                                                                      Creates an Amazon FSx for Lustre data repository task. You use data repository tasks\n to perform bulk operations between your Amazon FSx file system and its linked data\n repositories. An example of a data repository task is exporting any data and metadata\n changes, including POSIX metadata, to files, directories, and symbolic links (symlinks)\n from your FSx file system to a linked data repository. A CreateDataRepositoryTask\n operation will fail if a data repository is not linked to the FSx file system. To learn\n more about data repository tasks, see \n Data Repository Tasks. \n To learn more about linking a data repository to your file system, see \n Linking your file system to an S3 bucket.

                                                                      ", "smithy.api#idempotent": {} } }, @@ -1208,7 +1387,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a new, empty Amazon FSx file system.

                                                                      \n\n

                                                                      If a file system with the specified client request token exists and the parameters\n match, CreateFileSystem returns the description of the existing file\n system. If a file system specified client request token exists and the parameters\n don't match, this call returns IncompatibleParameterError. If a file\n system with the specified client request token doesn't exist,\n CreateFileSystem does the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new, empty Amazon FSx file system with an assigned ID, and an\n initial lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the file system.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      This operation requires a client request token in the request that Amazon FSx uses\n to ensure idempotent creation. This means that calling the operation multiple times with\n the same client request token has no effect. By using the idempotent operation, you can\n retry a CreateFileSystem operation without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives success as long as the\n parameters are the same.

                                                                      \n \n

                                                                      The CreateFileSystem call returns while the file system's\n lifecycle state is still CREATING. You can check the file-system\n creation status by calling the DescribeFileSystems operation,\n which returns the file system state along with other information.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Creates a new, empty Amazon FSx file system. You can create the following supported \n Amazon FSx file systems using the CreateFileSystem API operation:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Amazon FSx for Lustre

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon FSx for NetApp ONTAP

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon FSx for Windows File Server

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      This operation requires a client request token in the request that Amazon FSx uses\n to ensure idempotent creation. This means that calling the operation multiple times with\n the same client request token has no effect. By using the idempotent operation, you can\n retry a CreateFileSystem operation without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives success as long as the\n parameters are the same.

                                                                      \n

                                                                      If a file system with the specified client request token exists and the parameters\n match, CreateFileSystem returns the description of the existing file\n system. If a file system with the specified client request token exists and the\n parameters don't match, this call returns IncompatibleParameterError. If a\n file system with the specified client request token doesn't exist,\n CreateFileSystem does the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new, empty Amazon FSx file system with an assigned ID, and\n an initial lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the file system.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      This operation requires a client request token in the request that Amazon FSx\n uses to ensure idempotent creation. This means that calling the operation multiple times\n with the same client request token has no effect. By using the idempotent operation, you\n can retry a CreateFileSystem operation without the risk of creating an\n extra file system. This approach can be useful when an initial call fails in a way that\n makes it unclear whether a file system was created. Examples are if a transport-level\n timeout occurred, or your connection was reset. If you use the same client request token\n and the initial call created a file system, the client receives a success message as\n long as the parameters are the same.

                                                                      \n \n

                                                                      The CreateFileSystem call returns while the file system's lifecycle\n state is still CREATING. You can check the file-system creation status\n by calling the DescribeFileSystems operation, which returns the file system state\n along with other information.

                                                                      \n
                                                                      " } }, "com.amazonaws.fsx#CreateFileSystemFromBackup": { @@ -1249,7 +1428,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a new Amazon FSx for Lustre or Amazon FSx for Windows File Server file system\n from an existing Amazon FSx backup.

                                                                      \n \n

                                                                      If a file system with the specified client request token exists and the parameters\n match, this operation returns the description of the file system. If a client\n request token specified by the file system exists and the parameters don't match,\n this call returns IncompatibleParameterError. If a file system with the\n specified client request token doesn't exist, this operation does the following:

                                                                      \n \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new Amazon FSx file system from backup with an assigned ID, and\n an initial lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the file system.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      Parameters like Active Directory, default share name, automatic backup, and backup\n settings default to the parameters of the file system that was backed up, unless\n overridden. You can explicitly supply other settings.

                                                                      \n\n

                                                                      By using the idempotent operation, you can retry a\n CreateFileSystemFromBackup call without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives success as long as the\n parameters are the same.

                                                                      \n \n

                                                                      The CreateFileSystemFromBackup call returns while the file\n system's lifecycle state is still CREATING. You can check the\n file-system creation status by calling the DescribeFileSystems\n operation, which returns the file system state along with other\n information.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Creates a new Amazon FSx for Lustre, Amazon FSx for Windows File\n Server, or Amazon FSx for OpenZFS file system from an existing Amazon FSx backup.

                                                                      \n \n

                                                                      If a file system with the specified client request token exists and the parameters\n match, this operation returns the description of the file system. If a client request\n token with the specified by the file system exists and the parameters don't match, this\n call returns IncompatibleParameterError. If a file system with the\n specified client request token doesn't exist, this operation does the following:

                                                                      \n \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new Amazon FSx file system from backup with an assigned ID,\n and an initial lifecycle state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the file system.

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      Parameters like the Active Directory, default share name, automatic backup, and backup\n settings default to the parameters of the file system that was backed up, unless\n overridden. You can explicitly supply other settings.

                                                                      \n\n

                                                                      By using the idempotent operation, you can retry a\n CreateFileSystemFromBackup call without the risk of creating an extra\n file system. This approach can be useful when an initial call fails in a way that makes\n it unclear whether a file system was created. Examples are if a transport level timeout\n occurred, or your connection was reset. If you use the same client request token and the\n initial call created a file system, the client receives a success message as long as the\n parameters are the same.

                                                                      \n \n

                                                                      The CreateFileSystemFromBackup call returns while the file system's\n lifecycle state is still CREATING. You can check the file-system\n creation status by calling the \n DescribeFileSystems operation, which returns the file system state along\n with other information.

                                                                      \n
                                                                      " } }, "com.amazonaws.fsx#CreateFileSystemFromBackupRequest": { @@ -1264,21 +1443,21 @@ "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent creation. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent creation. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } }, "SubnetIds": { "target": "com.amazonaws.fsx#SubnetIds", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system will be accessible from. For Windows MULTI_AZ_1 \n file system deployment types, provide exactly two subnet IDs, one for the preferred file server \n and one for the standby file server. You specify one of these subnets as the preferred subnet \n using the WindowsConfiguration > PreferredSubnetID property.

                                                                      \n

                                                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 deployment \n types and Lustre file systems, provide exactly one subnet ID.\n The file server is launched in that subnet's Availability Zone.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system will be accessible from. For Windows MULTI_AZ_1 \n file system deployment types, provide exactly two subnet IDs, one for the preferred file server \n and one for the standby file server. You specify one of these subnets as the preferred subnet \n using the WindowsConfiguration > PreferredSubnetID property.

                                                                      \n

                                                                      Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment\n types, Lustre file systems, and OpenZFS file systems provide exactly one subnet ID. The\n file server is launched in that subnet's Availability Zone.

                                                                      ", "smithy.api#required": {} } }, "SecurityGroupIds": { "target": "com.amazonaws.fsx#SecurityGroupIds", "traits": { - "smithy.api#documentation": "

                                                                      A list of IDs for the security groups that apply to the specified network\n interfaces created for file system access. These security groups apply to all network\n interfaces. This value isn't returned in later DescribeFileSystem requests.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of IDs for the security groups that apply to the specified network interfaces\n created for file system access. These security groups apply to all network interfaces.\n This value isn't returned in later DescribeFileSystem requests.

                                                                      " } }, "Tags": { @@ -1299,7 +1478,7 @@ "StorageType": { "target": "com.amazonaws.fsx#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      Sets the storage type for the Windows file system you're creating from a backup. \n Valid values are SSD and HDD.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set to SSD to use solid state drive storage. \n Supported on all Windows deployment types.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set to HDD to use hard disk drive storage. \n Supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      \n Default value is SSD. \n

                                                                      \n \n

                                                                      HDD and SSD storage types have different minimum storage capacity requirements. \n A restored file system's storage capacity is tied to the file system that was backed up. \n You can create a file system that uses HDD storage from a backup of a file system that \n used SSD storage only if the original SSD file system had a storage capacity of at least 2000 GiB. \n

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Sets the storage type for the Windows or OpenZFS file system that you're creating from\n a backup. Valid values are SSD and HDD.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set to SSD to use solid state drive storage. SSD is supported on all Windows and OpenZFS\n deployment types.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set to HDD to use hard disk drive storage. \n HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 FSx for Windows File Server file system deployment types.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      The default value is SSD.

                                                                      \n \n

                                                                      HDD and SSD storage types have different minimum storage capacity requirements. \n A restored file system's storage capacity is tied to the file system that was backed up. \n You can create a file system that uses HDD storage from a backup of a file system that \n used SSD storage if the original SSD file system had a storage capacity of at least 2000 GiB.

                                                                      \n
                                                                      " } }, "KmsKeyId": { @@ -1308,7 +1487,13 @@ "FileSystemTypeVersion": { "target": "com.amazonaws.fsx#FileSystemTypeVersion", "traits": { - "smithy.api#documentation": "

                                                                      Sets the version for the Amazon FSx for Lustre file system you're creating from a backup.\n Valid values are 2.10 and 2.12.

                                                                      \n

                                                                      You don't need to specify FileSystemTypeVersion because it will\n be applied using the backup's FileSystemTypeVersion setting.\n If you choose to specify FileSystemTypeVersion when creating from backup, the\n value must match the backup's FileSystemTypeVersion setting.

                                                                      " + "smithy.api#documentation": "

                                                                      Sets the version for the Amazon FSx for Lustre file system that you're\n creating from a backup. Valid values are 2.10 and 2.12.

                                                                      \n

                                                                      You don't need to specify FileSystemTypeVersion because it will\n be applied using the backup's FileSystemTypeVersion setting.\n If you choose to specify FileSystemTypeVersion when creating from backup, the\n value must match the backup's FileSystemTypeVersion setting.

                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#CreateFileSystemOpenZFSConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The OpenZFS configuration for the file system that's being created.

                                                                      " } } }, @@ -1342,37 +1527,37 @@ "ImportPath": { "target": "com.amazonaws.fsx#ArchivePath", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) The path to the Amazon S3 bucket (including the optional prefix) that\n you're using as the data repository for your Amazon FSx for Lustre file system.\n The root of your FSx for Lustre file system will\n be mapped to the root of the Amazon S3 bucket you select. An\n example is s3://import-bucket/optional-prefix. If you specify a prefix\n after the Amazon S3 bucket name, only object keys with that prefix are loaded into the\n file system.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) The path to the Amazon S3 bucket (including the optional prefix) that\n you're using as the data repository for your Amazon FSx for Lustre file system.\n The root of your FSx for Lustre file system will\n be mapped to the root of the Amazon S3 bucket you select. An\n example is s3://import-bucket/optional-prefix. If you specify a prefix\n after the Amazon S3 bucket name, only object keys with that prefix are loaded into the\n file system.

                                                                      \n \n

                                                                      This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation to create\n a data repository association to link your Lustre file system to a data repository.

                                                                      \n
                                                                      " } }, "ExportPath": { "target": "com.amazonaws.fsx#ArchivePath", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) The path in Amazon S3 where the root of your Amazon FSx file system is exported. \n The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which\n new and changed data is to be exported from your Amazon FSx for Lustre file system. If\n an ExportPath value is not provided, Amazon FSx sets a default export path,\n s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in\n UTC format, for example\n s3://import-bucket/FSxLustre20181105T222312Z.

                                                                      \n\n

                                                                      The Amazon S3 export bucket must be the same as the import bucket specified by\n ImportPath. If you only specify a bucket name, such as\n s3://import-bucket, you get a 1:1 mapping of file system objects to S3\n bucket objects. This mapping means that the input data in S3 is overwritten on export.\n If you provide a custom prefix in the export path, such as\n s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file \n system to that export prefix in the Amazon S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) Available with Scratch and Persistent_1 deployment types. \n Specifies the path in the Amazon S3 bucket where the root of your Amazon FSx file system is exported. \n The path must use the same Amazon S3 bucket as specified in ImportPath. You can provide an optional prefix to which\n new and changed data is to be exported from your Amazon FSx for Lustre file system. If\n an ExportPath value is not provided, Amazon FSx sets a default export path,\n s3://import-bucket/FSxLustre[creation-timestamp]. The timestamp is in\n UTC format, for example\n s3://import-bucket/FSxLustre20181105T222312Z.

                                                                      \n

                                                                      The Amazon S3 export bucket must be the same as the import bucket specified by\n ImportPath. If you specify only a bucket name, such as\n s3://import-bucket, you get a 1:1 mapping of file system objects to S3\n bucket objects. This mapping means that the input data in S3 is overwritten on export.\n If you provide a custom prefix in the export path, such as\n s3://import-bucket/[custom-optional-prefix], Amazon FSx exports the contents of your file \n system to that export prefix in the Amazon S3 bucket.

                                                                      \n\n \n

                                                                      This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation to create\n a data repository association to link your Lustre file system to a data repository.

                                                                      \n
                                                                      " } }, "ImportedFileChunkSize": { "target": "com.amazonaws.fsx#Megabytes", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) For files imported from a data repository, this value determines the stripe\n count and maximum amount of data per file (in MiB) stored on a single physical disk. The\n maximum number of disks that a single file can be striped across is limited by the total\n number of disks that make up the file system.

                                                                      \n \n

                                                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500\n GiB). Amazon S3 objects have a maximum size of 5 TB.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) For files imported from a data repository, this value determines the stripe\n count and maximum amount of data per file (in MiB) stored on a single physical disk. The\n maximum number of disks that a single file can be striped across is limited by the total\n number of disks that make up the file system.

                                                                      \n \n

                                                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500\n GiB). Amazon S3 objects have a maximum size of 5 TB.

                                                                      \n

                                                                      This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation to create\n a data repository association to link your Lustre file system to a data repository.

                                                                      " } }, "DeploymentType": { "target": "com.amazonaws.fsx#LustreDeploymentType", "traits": { - "smithy.api#documentation": "

                                                                      \n Choose SCRATCH_1 and SCRATCH_2 deployment \n types when you need temporary storage and shorter-term processing of data. \n The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

                                                                      \n \n

                                                                      Choose PERSISTENT_1 deployment type for longer-term storage \n and workloads and encryption of data in transit. To learn more about deployment types, see \n \n FSx for Lustre Deployment Options.

                                                                      \n

                                                                      Encryption of data in-transit is automatically \n enabled when you access a SCRATCH_2 or PERSISTENT_1\n file system from Amazon EC2 instances that support this feature.\n (Default = SCRATCH_1)\n

                                                                      \n

                                                                      Encryption of data in-transit for SCRATCH_2 and PERSISTENT_1 \n deployment types is supported when accessed from supported instance types in supported Amazon Web Services Regions. To learn more, \n Encrypting Data in Transit.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) Choose SCRATCH_1 and SCRATCH_2 deployment \n types when you need temporary storage and shorter-term processing of data. \n The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

                                                                      \n

                                                                      Choose PERSISTENT_1 for longer-term storage and for throughput-focused \n workloads that aren’t latency-sensitive. a. \n PERSISTENT_1 supports encryption of data in transit, and is available in all \n Amazon Web Services Regions in which FSx for Lustre is available.

                                                                      \n

                                                                      Choose PERSISTENT_2 for longer-term storage and for latency-sensitive workloads \n that require the highest levels of IOPS/throughput. PERSISTENT_2 supports \n SSD storage, and offers higher PerUnitStorageThroughput (up to 1000 MB/s/TiB). PERSISTENT_2 \n is available in a limited number of Amazon Web Services Regions. \n For more information, and an up-to-date list of Amazon Web Services Regions in which \n PERSISTENT_2 is available, see \n File \n system deployment options for FSx for Lustre in the Amazon FSx for Lustre User Guide.

                                                                      \n \n

                                                                      If you choose PERSISTENT_2, and you set FileSystemTypeVersion to\n 2.10, the CreateFileSystem operation fails.

                                                                      \n
                                                                      \n \n

                                                                      Encryption of data in transit is automatically turned on when you access\n SCRATCH_2, PERSISTENT_1 and PERSISTENT_2 file\n systems from Amazon EC2 instances that support\n automatic encryption in the Amazon Web Services Regions where they are\n available. For more information about encryption in transit for FSx for Lustre\n file systems, see Encrypting data in\n transit in the Amazon FSx for Lustre User Guide.

                                                                      \n

                                                                      (Default = SCRATCH_1)

                                                                      " } }, "AutoImportPolicy": { "target": "com.amazonaws.fsx#AutoImportPolicyType", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                                                                      \n \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Automatically import updates from your S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) Available with Scratch and Persistent_1 deployment types. When you \n create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically\n imports file and directory listings of any new objects added to the S3 bucket, any \n existing objects that are changed in the S3 bucket, and any objects that were deleted\n in the S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see \n Automatically import updates from your S3 bucket.

                                                                      \n \n

                                                                      This parameter is not supported for file systems with the Persistent_2 deployment type.\n Instead, use CreateDataRepositoryAssociation\" to create\n a data repository association to link your Lustre file system to a data repository.

                                                                      \n
                                                                      " } }, "PerUnitStorageThroughput": { "target": "com.amazonaws.fsx#PerUnitStorageThroughput", "traits": { - "smithy.api#documentation": "

                                                                      \n Required for the PERSISTENT_1 deployment type, describes the amount of read and write \n throughput for each 1 tebibyte of storage, in MB/s/TiB. \n File system throughput capacity is calculated by multiplying file system storage capacity (TiB) by the PerUnitStorageThroughput\n (MB/s/TiB). For a 2.4 TiB file system, provisioning 50 MB/s/TiB of PerUnitStorageThroughput\n yields 120 MB/s of file system throughput. You pay for the amount of throughput that you\n provision.\n

                                                                      \n

                                                                      Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: 12, 40.

                                                                      " + "smithy.api#documentation": "

                                                                      Required with PERSISTENT_1 and PERSISTENT_2 deployment\n types, provisions the amount of read and write throughput for each 1 tebibyte (TiB) of\n file system storage capacity, in MB/s/TiB. File system throughput capacity is calculated\n by multiplying file system storage capacity (TiB) by the\n PerUnitStorageThroughput (MB/s/TiB). For a 2.4-TiB file system,\n provisioning 50 MB/s/TiB of PerUnitStorageThroughput yields 120 MB/s of file\n system throughput. You pay for the amount of throughput that you provision.

                                                                      \n

                                                                      Valid values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For PERSISTENT_1 SSD storage: 50, 100, 200 MB/s/TiB.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT_1 HDD storage: 12, 40 MB/s/TiB.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT_2 SSD storage: 125, 250, 500, 1000 MB/s/TiB.

                                                                        \n
                                                                      • \n
                                                                      " } }, "DailyAutomaticBackupStartTime": { @@ -1384,24 +1569,30 @@ "CopyTagsToBackups": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) Not available to use with file systems that are linked to a data repository. \n A boolean flag indicating whether tags for the file system should be copied to\n backups. The default value is false. If it's set to true, all file system\n tags are copied to all automatic and user-initiated backups when the user\n doesn't specify any backup-specific tags. If this value is true, and you specify one or more backup tags, only\n the specified tags are copied to backups. If you specify one or more tags when creating a\n user-initiated backup, no tags are copied from the file system, regardless of this value.

                                                                      \n

                                                                      For more information, see Working with backups.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) Not available for use with file systems that are linked to a data\n repository. A boolean flag indicating whether tags for the file system should be copied\n to backups. The default value is false. If CopyTagsToBackups is set to\n true, all file system tags are copied to all automatic and user-initiated backups when\n the user doesn't specify any backup-specific tags. If\n CopyTagsToBackups is set to true and you specify one or more backup\n tags, only the specified tags are copied to backups. If you specify one or more tags\n when creating a user-initiated backup, no tags are copied from the file system,\n regardless of this value.

                                                                      \n

                                                                      (Default = false)

                                                                      \n

                                                                      For more information, see \n Working with backups in the Amazon FSx for Lustre User Guide.

                                                                      " } }, "DriveCacheType": { "target": "com.amazonaws.fsx#DriveCacheType", "traits": { - "smithy.api#documentation": "

                                                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with\n HDD storage devices. This parameter is required when storage type is HDD. Set to\n READ, improve the performance for frequently accessed files and allows 20%\n of the total storage capacity of the file system to be cached.

                                                                      \n

                                                                      This parameter is required when StorageType is set to HDD.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with\n HDD storage devices. This parameter is required when storage type is HDD. Set this property to\n READ to improve the performance for frequently accessed files by caching up to 20%\n of the total storage capacity of the file system.

                                                                      \n

                                                                      This parameter is required when StorageType is set to HDD.

                                                                      " } }, "DataCompressionType": { "target": "com.amazonaws.fsx#DataCompressionType", "traits": { - "smithy.api#documentation": "

                                                                      Sets the data compression configuration for the file system. DataCompressionType\n can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) Data compression is turned off when\n the file system is created.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LZ4 - Data compression is turned on with the LZ4\n algorithm.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Lustre data compression.

                                                                      " + "smithy.api#documentation": "

                                                                      Sets the data compression configuration for the file system. DataCompressionType\n can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) Data compression is turned off when\n the file system is created.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LZ4 - Data compression is turned on with the LZ4\n algorithm.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Lustre data compression \n in the Amazon FSx for Lustre User Guide.

                                                                      " + } + }, + "LogConfiguration": { + "target": "com.amazonaws.fsx#LustreLogCreateConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The Lustre logging configuration used when creating an Amazon FSx for Lustre\n file system. When logging is enabled, Lustre logs error and warning events for data repositories\n associated with your file system to Amazon CloudWatch Logs.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The Lustre configuration for the file system being created. \n

                                                                      " + "smithy.api#documentation": "

                                                                      The Lustre configuration for the file system being created.

                                                                      \n \n

                                                                      The following parameters are not supported for file systems with the Persistent_2\n deployment type. Instead, use CreateDataRepositoryAssociation \n to create a data repository association to link your Lustre file system to a data repository.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AutoImportPolicy\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ExportPath\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ImportedChunkSize\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ImportPath\n

                                                                        \n
                                                                      • \n
                                                                      \n
                                                                      " } }, "com.amazonaws.fsx#CreateFileSystemOntapConfiguration": { @@ -1416,7 +1607,7 @@ "DeploymentType": { "target": "com.amazonaws.fsx#OntapDeploymentType", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the ONTAP file system deployment type to use in creating the file system.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the FSx for ONTAP file system deployment type to use in creating the file system. \n MULTI_AZ_1 is the supported ONTAP deployment type.

                                                                      ", "smithy.api#required": {} } }, @@ -1429,36 +1620,92 @@ "FsxAdminPassword": { "target": "com.amazonaws.fsx#AdminPassword", "traits": { - "smithy.api#documentation": "

                                                                      The ONTAP administrative password for the fsxadmin user that you can\n use to administer your file system using the ONTAP CLI and REST API.

                                                                      " + "smithy.api#documentation": "

                                                                      The ONTAP administrative password for the fsxadmin user with which you\n administer your file system using the NetApp ONTAP CLI and REST API.

                                                                      " } }, "DiskIopsConfiguration": { "target": "com.amazonaws.fsx#DiskIopsConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The SSD IOPS configuration for the Amazon FSx for NetApp ONTAP file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The SSD IOPS configuration for the FSx for ONTAP file system.

                                                                      " } }, "PreferredSubnetId": { - "target": "com.amazonaws.fsx#SubnetId" + "target": "com.amazonaws.fsx#SubnetId", + "traits": { + "smithy.api#documentation": "

                                                                      Required when DeploymentType is set to MULTI_AZ_1. This specifies the subnet \n in which you want the preferred file server to be located.

                                                                      " + } }, "RouteTableIds": { "target": "com.amazonaws.fsx#RouteTableIds", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the VPC route tables in which your file system's endpoints will be\n created. You should specify all VPC route tables associated with the subnets\n in which your clients are located. By default, Amazon FSx selects your VPC's\n default route table.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the virtual private cloud (VPC) route tables in which your file system's\n endpoints will be created. You should specify all VPC route tables associated with the\n subnets in which your clients are located. By default, Amazon FSx selects your VPC's\n default route table.

                                                                      " + } + }, + "ThroughputCapacity": { + "target": "com.amazonaws.fsx#MegabytesPerSecond", + "traits": { + "smithy.api#documentation": "

                                                                      Sets the throughput capacity for the file system that you're creating. \n Valid values are 512, 1024, and 2048 MBps.

                                                                      ", + "smithy.api#required": {} + } + }, + "WeeklyMaintenanceStartTime": { + "target": "com.amazonaws.fsx#WeeklyTime" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The ONTAP configuration properties of the FSx for ONTAP file system that you are creating.

                                                                      " + } + }, + "com.amazonaws.fsx#CreateFileSystemOpenZFSConfiguration": { + "type": "structure", + "members": { + "AutomaticBackupRetentionDays": { + "target": "com.amazonaws.fsx#AutomaticBackupRetentionDays" + }, + "CopyTagsToBackups": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the file system should be copied to\n backups. This value defaults to false. If it's set to true,\n all tags for the file system are copied to all automatic and user-initiated backups\n where the user doesn't specify tags. If this value is true, and you specify\n one or more tags, only the specified tags are copied to backups. If you specify one or\n more tags when creating a user-initiated backup, no tags are copied from the file\n system, regardless of this value.

                                                                      " + } + }, + "CopyTagsToVolumes": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true, and you specify one or more tags, only the specified tags\n are copied to snapshots. If you specify one or more tags when creating the snapshot, no\n tags are copied from the volume, regardless of this value.

                                                                      " + } + }, + "DailyAutomaticBackupStartTime": { + "target": "com.amazonaws.fsx#DailyTime" + }, + "DeploymentType": { + "target": "com.amazonaws.fsx#OpenZFSDeploymentType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the file system deployment type. Amazon FSx for OpenZFS supports\n SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a\n single Availability Zone (AZ) of redundancy.

                                                                      ", + "smithy.api#required": {} } }, "ThroughputCapacity": { "target": "com.amazonaws.fsx#MegabytesPerSecond", "traits": { + "smithy.api#documentation": "

                                                                      Specifies the throughput of an Amazon FSx for OpenZFS file system, measured in megabytes per second\n (MB/s). Valid values are 64, 128, 256, 512, 1024, 2048, 3072, or 4096 MB/s. \n You pay for additional throughput capacity that you provision.

                                                                      ", "smithy.api#required": {} } }, "WeeklyMaintenanceStartTime": { "target": "com.amazonaws.fsx#WeeklyTime" + }, + "DiskIopsConfiguration": { + "target": "com.amazonaws.fsx#DiskIopsConfiguration" + }, + "RootVolumeConfiguration": { + "target": "com.amazonaws.fsx#OpenZFSCreateRootVolumeConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration Amazon FSx uses when creating the root value of the Amazon FSx for OpenZFS\n file system. All volumes are children of the root volume.

                                                                      " + } } }, "traits": { - "smithy.api#documentation": "

                                                                      The ONTAP configuration properties of the FSx for NetApp ONTAP file system that you are creating.

                                                                      " + "smithy.api#documentation": "

                                                                      The OpenZFS configuration properties for the file system that you are creating.

                                                                      " } }, "com.amazonaws.fsx#CreateFileSystemRequest": { @@ -1467,34 +1714,34 @@ "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent creation. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent creation. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } }, "FileSystemType": { "target": "com.amazonaws.fsx#FileSystemType", "traits": { - "smithy.api#documentation": "

                                                                      The type of Amazon FSx file system to create. Valid values are WINDOWS,\n LUSTRE, and ONTAP.

                                                                      ", + "smithy.api#documentation": "

                                                                      The type of Amazon FSx file system to create. Valid values are\n WINDOWS, LUSTRE, ONTAP, and\n OPENZFS.

                                                                      ", "smithy.api#required": {} } }, "StorageCapacity": { "target": "com.amazonaws.fsx#StorageCapacity", "traits": { - "smithy.api#documentation": "

                                                                      Sets the storage capacity of the file system that you're creating.

                                                                      \n

                                                                      For Lustre file systems:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values are \n 1200 GiB, 2400 GiB, and increments of 2400 GiB.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT HDD file systems, valid values are increments of 6000 GiB for \n 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SCRATCH_1 deployment type, valid values are \n 1200 GiB, 2400 GiB, and increments of 3600 GiB.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For Windows file systems:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        If StorageType=SSD, valid values are 32 GiB - 65,536 GiB (64 TiB).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        If StorageType=HDD, valid values are 2000 GiB - 65,536 GiB (64 TiB).

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For ONTAP file systems:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Valid values are 1024 GiB - 196,608 GiB (192 TiB).

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Sets the storage capacity of the file system that you're creating, in gibibytes (GiB).

                                                                      \n

                                                                      \n FSx for Lustre file systems - The amount of\n storage capacity that you can configure depends on the value that you set for\n StorageType and the Lustre DeploymentType, as\n follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For SCRATCH_2, PERSISTENT_2 and PERSISTENT_1 deployment types \n using SSD storage type, the valid values are 1200 GiB, 2400 GiB, and increments of 2400 GiB.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT_1 HDD file systems, valid values are increments of 6000 GiB for \n 12 MB/s/TiB file systems and increments of 1800 GiB for 40 MB/s/TiB file systems.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SCRATCH_1 deployment type, valid values are \n 1200 GiB, 2400 GiB, and increments of 3600 GiB.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      \n FSx for ONTAP file systems - The amount of storage capacity \n that you can configure is from 1024 GiB up to 196,608 GiB (192 TiB).

                                                                      \n

                                                                      \n FSx for OpenZFS file systems - The amount of storage capacity that \n you can configure is from 64 GiB up to 524,288 GiB (512 TiB).

                                                                      \n

                                                                      \n FSx for Windows File Server file systems - The amount\n of storage capacity that you can configure depends on the value that you set for\n StorageType as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For SSD storage, valid values are 32 GiB-65,536 GiB (64 TiB).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For HDD storage, valid values are 2000 GiB-65,536 GiB (64 TiB).

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, "StorageType": { "target": "com.amazonaws.fsx#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      Sets the storage type for the file system you're creating. \n Valid values are SSD and HDD.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set to SSD to use solid state drive storage. \n SSD is supported on all Windows, Lustre, and ONTAP deployment types.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set to HDD to use hard disk drive storage. \n HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types,\n and on PERSISTENT Lustre file system deployment types. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      \n Default value is SSD. For more information, see \n \n Storage Type Options in the Amazon FSx for Windows User Guide and \n Multiple Storage Options \n in the Amazon FSx for Lustre User Guide.\n

                                                                      " + "smithy.api#documentation": "

                                                                      Sets the storage type for the file system that you're creating. Valid values are\n SSD and HDD.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set to SSD to use solid state drive storage. SSD is supported on all Windows,\n Lustre, ONTAP, and OpenZFS deployment types.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set to HDD to use hard disk drive storage. \n HDD is supported on SINGLE_AZ_2 and MULTI_AZ_1 Windows file system deployment types,\n and on PERSISTENT Lustre file system deployment types. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Default value is SSD. For more information, see Storage\n type options in the FSx for Windows File Server User\n Guide and Multiple storage\n options in the FSx for Lustre User\n Guide.

                                                                      " } }, "SubnetIds": { "target": "com.amazonaws.fsx#SubnetIds", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system will be accessible from. For Windows\n and ONTAP MULTI_AZ_1 file system deployment types, provide exactly two subnet IDs,\n one for the preferred file server and one for the standby file server. You specify one of these\n subnets as the preferred subnet using the WindowsConfiguration > PreferredSubnetID\n or OntapConfiguration > PreferredSubnetID properties. For more information, \n see \n Availability and durability: Single-AZ and Multi-AZ file systems in the\n Amazon FSx for Windows User Guide and\n \n Availability and durability in the\n Amazon FSx for ONTAP User Guide.

                                                                      \n

                                                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 file system deployment types and Lustre file systems, provide exactly one subnet ID.\n The file server is launched in that subnet's Availability Zone.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system will be accessible from. For\n Windows and ONTAP MULTI_AZ_1 deployment types,provide exactly two subnet\n IDs, one for the preferred file server and one for the standby file server. You specify\n one of these subnets as the preferred subnet using the WindowsConfiguration >\n PreferredSubnetID or OntapConfiguration > PreferredSubnetID\n properties. For more information about Multi-AZ file system configuration, see \n Availability and durability: Single-AZ and Multi-AZ file systems in the\n Amazon FSx for Windows User Guide and \n Availability and durability in the Amazon FSx for ONTAP User\n Guide.

                                                                      \n

                                                                      For Windows SINGLE_AZ_1 and SINGLE_AZ_2 and all Lustre \n deployment types, provide exactly one subnet ID.\n The file server is launched in that subnet's Availability Zone.

                                                                      ", "smithy.api#required": {} } }, @@ -1507,7 +1754,7 @@ "Tags": { "target": "com.amazonaws.fsx#Tags", "traits": { - "smithy.api#documentation": "

                                                                      The tags to apply to the file system being created. The key value of\n the Name tag appears in the console as the file system name.

                                                                      " + "smithy.api#documentation": "

                                                                      The tags to apply to the file system that's being created. The key value of the\n Name tag appears in the console as the file system name.

                                                                      " } }, "KmsKeyId": { @@ -1516,7 +1763,7 @@ "WindowsConfiguration": { "target": "com.amazonaws.fsx#CreateFileSystemWindowsConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The Microsoft Windows configuration for the file system being created. \n

                                                                      " + "smithy.api#documentation": "

                                                                      The Microsoft Windows configuration for the file system that's being created.

                                                                      " } }, "LustreConfiguration": { @@ -1528,7 +1775,13 @@ "FileSystemTypeVersion": { "target": "com.amazonaws.fsx#FileSystemTypeVersion", "traits": { - "smithy.api#documentation": "

                                                                      Sets the version of the Amazon FSx for Lustre file system you're creating.\n Valid values are 2.10 and 2.12.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set the value to 2.10 to create a Lustre 2.10\n file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set the value to 2.12 to create a Lustre 2.12\n file system.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Default value is 2.10.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) For FSx for Lustre file systems, sets the Lustre version for the\n file system that you're creating. Valid values are 2.10 and\n 2.12:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        2.10 is supported by the Scratch and Persistent_1 Lustre deployment types.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        2.12 is supported by all Lustre deployment types. 2.12 is \n required when setting FSx for Lustre DeploymentType to \n PERSISTENT_2.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Default value = 2.10, except when DeploymentType is set to \n PERSISTENT_2, then the default is 2.12.

                                                                      \n \n

                                                                      If you set FileSystemTypeVersion to 2.10 for a \n PERSISTENT_2 Lustre deployment type, the CreateFileSystem \n operation fails.

                                                                      \n
                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#CreateFileSystemOpenZFSConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The OpenZFS configuration for the file system that's being created.

                                                                      " } } }, @@ -1577,7 +1830,7 @@ "ThroughputCapacity": { "target": "com.amazonaws.fsx#MegabytesPerSecond", "traits": { - "smithy.api#documentation": "

                                                                      The throughput of an Amazon FSx file system, measured in megabytes per second, in 2 to\n the nth increments, between 2^3 (8) and 2^11 (2048).

                                                                      ", + "smithy.api#documentation": "

                                                                      Sets the throughput capacity of an Amazon FSx file system, measured in megabytes per second (MB/s), in 2 to\n the nth increments, between 2^3 (8) and 2^11 (2048).

                                                                      ", "smithy.api#required": {} } }, @@ -1667,46 +1920,196 @@ "smithy.api#documentation": "

                                                                      Specifies the configuration of the ONTAP volume that you are creating.

                                                                      " } }, - "com.amazonaws.fsx#CreateStorageVirtualMachine": { - "type": "operation", - "input": { - "target": "com.amazonaws.fsx#CreateStorageVirtualMachineRequest" - }, - "output": { - "target": "com.amazonaws.fsx#CreateStorageVirtualMachineResponse" - }, - "errors": [ - { - "target": "com.amazonaws.fsx#ActiveDirectoryError" - }, - { - "target": "com.amazonaws.fsx#BadRequest" - }, - { - "target": "com.amazonaws.fsx#FileSystemNotFound" - }, - { - "target": "com.amazonaws.fsx#IncompatibleParameterError" - }, - { - "target": "com.amazonaws.fsx#InternalServerError" - }, - { - "target": "com.amazonaws.fsx#ServiceLimitExceeded" + "com.amazonaws.fsx#CreateOpenZFSOriginSnapshotConfiguration": { + "type": "structure", + "members": { + "SnapshotARN": { + "target": "com.amazonaws.fsx#ResourceARN", + "traits": { + "smithy.api#required": {} + } }, - { - "target": "com.amazonaws.fsx#UnsupportedOperation" + "CopyStrategy": { + "target": "com.amazonaws.fsx#OpenZFSCopyStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      The strategy used when copying data from the snapshot to the new volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CLONE - The new volume references the data in the origin\n snapshot. Cloning a snapshot is faster than copying data from the snapshot to a\n new volume and doesn't consume disk throughput. However, the origin snapshot\n can't be deleted if there is a volume using its copied data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FULL_COPY - Copies all data from the snapshot to the new volume.\n

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } } - ], + }, "traits": { - "smithy.api#documentation": "

                                                                      Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The snapshot configuration to use when creating an OpenZFS volume from a snapshot.

                                                                      " } }, - "com.amazonaws.fsx#CreateStorageVirtualMachineRequest": { + "com.amazonaws.fsx#CreateOpenZFSVolumeConfiguration": { "type": "structure", "members": { - "ActiveDirectoryConfiguration": { - "target": "com.amazonaws.fsx#CreateSvmActiveDirectoryConfiguration", + "ParentVolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the volume to use as the parent volume.

                                                                      ", + "smithy.api#required": {} + } + }, + "StorageCapacityReservationGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't\n reserve more storage than the parent volume has reserved.

                                                                      " + } + }, + "StorageCapacityQuotaGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum amount of storage in gibibytes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                                                                      " + } + }, + "DataCompressionType": { + "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Doesn't compress the data on the volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "CopyTagsToSnapshots": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true, and you specify one or more tags, only the specified tags\n are copied to snapshots. If you specify one or more tags when creating the snapshot, no\n tags are copied from the volume, regardless of this value.

                                                                      " + } + }, + "OriginSnapshot": { + "target": "com.amazonaws.fsx#CreateOpenZFSOriginSnapshotConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object that specifies the snapshot to use as the origin of the data\n for the volume.

                                                                      " + } + }, + "ReadOnly": { + "target": "com.amazonaws.fsx#ReadOnly", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether the volume is read-only.

                                                                      " + } + }, + "NfsExports": { + "target": "com.amazonaws.fsx#OpenZFSNfsExports", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for mounting a Network File System (NFS) file system.

                                                                      " + } + }, + "UserAndGroupQuotas": { + "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", + "traits": { + "smithy.api#documentation": "

                                                                      An object specifying how much storage users or groups can use on the volume.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the configuration of the OpenZFS volume that you are creating.

                                                                      " + } + }, + "com.amazonaws.fsx#CreateSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#CreateSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.fsx#CreateSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + }, + { + "target": "com.amazonaws.fsx#VolumeNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a snapshot of an existing Amazon FSx for OpenZFS file system. With\n snapshots, you can easily undo file changes and compare file versions by restoring the\n volume to a previous version.

                                                                      \n

                                                                      If a snapshot with the specified client request token exists, and the parameters\n match, this operation returns the description of the existing snapshot. If a snapshot\n with the specified client request token exists, and the parameters don't match, this\n operation returns IncompatibleParameterError. If a snapshot with the\n specified client request token doesn't exist, CreateSnapshot does the\n following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Creates a new OpenZFS snapshot with an assigned ID, and an initial lifecycle\n state of CREATING.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Returns the description of the snapshot.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      By using the idempotent operation, you can retry a CreateSnapshot\n operation without the risk of creating an extra snapshot. This approach can be useful\n when an initial call fails in a way that makes it unclear whether a snapshot was\n created. If you use the same client request token and the initial call created a\n snapshot, the operation returns a successful result because all the parameters are the\n same.

                                                                      \n

                                                                      The CreateSnapshot operation returns while the snapshot's lifecycle state\n is still CREATING. You can check the snapshot creation status by calling\n the DescribeSnapshots operation, which returns the snapshot state along with\n other information.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#CreateSnapshotRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "Name": { + "target": "com.amazonaws.fsx#SnapshotName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the snapshot.

                                                                      ", + "smithy.api#required": {} + } + }, + "VolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the volume that you are taking a snapshot of.

                                                                      ", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.fsx#Tags" + } + } + }, + "com.amazonaws.fsx#CreateSnapshotResponse": { + "type": "structure", + "members": { + "Snapshot": { + "target": "com.amazonaws.fsx#Snapshot", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the snapshot.

                                                                      " + } + } + } + }, + "com.amazonaws.fsx#CreateStorageVirtualMachine": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#CreateStorageVirtualMachineRequest" + }, + "output": { + "target": "com.amazonaws.fsx#CreateStorageVirtualMachineResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#ActiveDirectoryError" + }, + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#IncompatibleParameterError" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + }, + { + "target": "com.amazonaws.fsx#UnsupportedOperation" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a storage virtual machine (SVM) for an Amazon FSx for ONTAP file system.

                                                                      " + } + }, + "com.amazonaws.fsx#CreateStorageVirtualMachineRequest": { + "type": "structure", + "members": { + "ActiveDirectoryConfiguration": { + "target": "com.amazonaws.fsx#CreateSvmActiveDirectoryConfiguration", "traits": { "smithy.api#documentation": "

                                                                      Describes the self-managed Microsoft Active Directory to which you want to join the SVM. \n Joining an Active Directory provides user authentication and access control for SMB clients, \n including Microsoft Windows and macOS client accessing the file system.

                                                                      " } @@ -1811,7 +2214,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates an Amazon FSx for NetApp ONTAP storage volume.

                                                                      " + "smithy.api#documentation": "

                                                                      Creates an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS storage\n volume.

                                                                      " } }, "com.amazonaws.fsx#CreateVolumeFromBackup": { @@ -1908,25 +2311,31 @@ "VolumeType": { "target": "com.amazonaws.fsx#VolumeType", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the type of volume to create; ONTAP is the only valid volume type.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the type of volume to create; ONTAP and OPENZFS are\n the only valid volume types.

                                                                      ", "smithy.api#required": {} } }, "Name": { "target": "com.amazonaws.fsx#VolumeName", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the name of the volume you're creating.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the name of the volume that you're creating.

                                                                      ", "smithy.api#required": {} } }, "OntapConfiguration": { "target": "com.amazonaws.fsx#CreateOntapVolumeConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the ONTAP configuration to use in creating the volume.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the configuration to use when creating the ONTAP volume.

                                                                      " } }, "Tags": { "target": "com.amazonaws.fsx#Tags" + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#CreateOpenZFSVolumeConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the configuration to use when creating the OpenZFS volume.

                                                                      " + } } } }, @@ -1984,13 +2393,124 @@ ] } }, + "com.amazonaws.fsx#DataRepositoryAssociation": { + "type": "structure", + "members": { + "AssociationId": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationId", + "traits": { + "smithy.api#documentation": "

                                                                      The system-generated, unique ID of the data repository association.

                                                                      " + } + }, + "ResourceARN": { + "target": "com.amazonaws.fsx#ResourceARN" + }, + "FileSystemId": { + "target": "com.amazonaws.fsx#FileSystemId" + }, + "Lifecycle": { + "target": "com.amazonaws.fsx#DataRepositoryLifecycle", + "traits": { + "smithy.api#documentation": "

                                                                      Describes the state of a data repository association. The lifecycle can have\n the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CREATING - The data repository association between \n the FSx file system and the S3 data repository is being created. \n The data repository is unavailable.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AVAILABLE - The data repository association is\n available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - Amazon FSx cannot automatically import updates\n from the S3 bucket or automatically export updates to the S3 bucket until the data\n repository association configuration is corrected.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATING - The data repository association is undergoing\n a customer initiated update that might affect its availability.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - The data repository association is undergoing\n a customer initiated deletion.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - The data repository association is in a terminal\n state that cannot be recovered.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "FailureDetails": { + "target": "com.amazonaws.fsx#DataRepositoryFailureDetails" + }, + "FileSystemPath": { + "target": "com.amazonaws.fsx#Namespace", + "traits": { + "smithy.api#documentation": "

                                                                      A path on the file system that points to a high-level directory (such\n as /ns1/) or subdirectory (such as /ns1/subdir/)\n that will be mapped 1-1 with DataRepositoryPath.\n The leading forward slash in the name is required. Two data repository\n associations cannot have overlapping file system paths. For example, if\n a data repository is associated with file system path /ns1/,\n then you cannot link another data repository with file system\n path /ns1/ns2.

                                                                      \n

                                                                      This path specifies where in your file system files will be exported\n from or imported to. This file system directory can be linked to only one\n Amazon S3 bucket, and no other S3 bucket can be linked to the directory.

                                                                      " + } + }, + "DataRepositoryPath": { + "target": "com.amazonaws.fsx#ArchivePath", + "traits": { + "smithy.api#documentation": "

                                                                      The path to the Amazon S3 data repository that will be linked to the file\n system. The path can be an S3 bucket or prefix in the format\n s3://myBucket/myPrefix/. This path specifies where in the S3\n data repository files will be imported from or exported to.

                                                                      " + } + }, + "BatchImportMetaDataOnCreate": { + "target": "com.amazonaws.fsx#BatchImportMetaDataOnCreate", + "traits": { + "smithy.api#documentation": "

                                                                      A boolean flag indicating whether an import data repository task to import\n metadata should run after the data repository association is created. The\n task runs if this flag is set to true.

                                                                      " + } + }, + "ImportedFileChunkSize": { + "target": "com.amazonaws.fsx#Megabytes", + "traits": { + "smithy.api#documentation": "

                                                                      For files imported from a data repository, this value determines the stripe count and\n maximum amount of data per file (in MiB) stored on a single physical disk. The maximum\n number of disks that a single file can be striped across is limited by the total number\n of disks that make up the file system.

                                                                      \n \n

                                                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500\n GiB). Amazon S3 objects have a maximum size of 5 TB.

                                                                      " + } + }, + "S3": { + "target": "com.amazonaws.fsx#S3DataRepositoryConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for an Amazon S3 data repository linked to an\n Amazon FSx Lustre file system with a data repository association.\n The configuration defines which file events (new, changed, or\n deleted files or directories) are automatically imported from\n the linked data repository to the file system or automatically\n exported from the file system to the data repository.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.fsx#Tags" + }, + "CreationTime": { + "target": "com.amazonaws.fsx#CreationTime" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of a data repository association that links\n an Amazon FSx for Lustre file system to an Amazon S3 bucket.\n The data repository association configuration object is returned \n in the response of the following operations:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CreateDataRepositoryAssociation\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UpdateDataRepositoryAssociation\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DescribeDataRepositoryAssociations\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Data repository associations are supported only for file systems\n with the Persistent_2 deployment type.

                                                                      " + } + }, + "com.amazonaws.fsx#DataRepositoryAssociationId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 13, + "max": 23 + }, + "smithy.api#pattern": "^(dra-[0-9a-f]{8,})$" + } + }, + "com.amazonaws.fsx#DataRepositoryAssociationIds": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationId" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.fsx#DataRepositoryAssociationNotFound": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.fsx#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      No data repository associations were found based upon the supplied parameters.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.fsx#DataRepositoryAssociations": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#DataRepositoryAssociation" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, "com.amazonaws.fsx#DataRepositoryConfiguration": { "type": "structure", "members": { "Lifecycle": { "target": "com.amazonaws.fsx#DataRepositoryLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. \n The lifecycle can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CREATING - The data repository configuration between \n the FSx file system and the linked S3 data repository is being created. \n The data repository is unavailable.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AVAILABLE - The data repository is available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - Amazon FSx cannot automatically import updates from the S3 bucket \n until the data repository configuration is corrected. For more information, see \n Troubleshooting a Misconfigured linked S3 bucket.\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATING - The data repository is undergoing a customer initiated update and availability may be impacted.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes the state of the file system's S3 durable data repository, if it is configured with an S3 repository. \n The lifecycle can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CREATING - The data repository configuration between \n the FSx file system and the linked S3 data repository is being created. \n The data repository is unavailable.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AVAILABLE - The data repository is available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - Amazon FSx cannot automatically import updates from the S3 bucket \n until the data repository configuration is corrected. For more information, see \n Troubleshooting a Misconfigured linked S3 bucket.\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATING - The data repository is undergoing a customer\n initiated update and availability may be impacted.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - The data repository is in a terminal state that\n cannot be recovered.

                                                                        \n
                                                                      • \n
                                                                      " } }, "ImportPath": { @@ -2014,7 +2534,7 @@ "AutoImportPolicy": { "target": "com.amazonaws.fsx#AutoImportPolicyType", "traits": { - "smithy.api#documentation": "

                                                                      Describes the file system's linked S3 data repository's AutoImportPolicy. \n The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Automatically import updates from your S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes the file system's linked S3 data repository's AutoImportPolicy. \n The AutoImportPolicy configures how Amazon FSx keeps your file and directory listings up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update file and directory \n listings for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically\n imports file and directory listings of any new objects added to the S3 bucket, any \n existing objects that are changed in the S3 bucket, and any objects that were deleted\n in the S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      " } }, "FailureDetails": { @@ -2022,7 +2542,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The data repository configuration object for Lustre file systems returned in the response of\n the CreateFileSystem operation.

                                                                      " + "smithy.api#documentation": "

                                                                      The data repository configuration object for Lustre file systems returned in the response of\n the CreateFileSystem operation.

                                                                      \n

                                                                      This data type is not supported for file systems with the Persistent_2 deployment type.\n Instead, use .

                                                                      " } }, "com.amazonaws.fsx#DataRepositoryFailureDetails": { @@ -2033,7 +2553,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Provides detailed information about the data respository if its Lifecycle is \n set to MISCONFIGURED.

                                                                      " + "smithy.api#documentation": "

                                                                      Provides detailed information about the data respository if its Lifecycle is \n set to MISCONFIGURED or FAILED.

                                                                      " } }, "com.amazonaws.fsx#DataRepositoryLifecycle": { @@ -2059,6 +2579,10 @@ { "value": "DELETING", "name": "DELETING" + }, + { + "value": "FAILED", + "name": "FAILED" } ] } @@ -2083,7 +2607,7 @@ "Type": { "target": "com.amazonaws.fsx#DataRepositoryTaskType", "traits": { - "smithy.api#documentation": "

                                                                      The type of data repository task; EXPORT_TO_REPOSITORY is the only type currently supported.

                                                                      ", + "smithy.api#documentation": "

                                                                      The type of data repository task.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The EXPORT_TO_REPOSITORY data repository task exports\n from your Lustre file system from to a linked S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The IMPORT_METADATA_FROM_REPOSITORY data repository task\n imports metadata changes from a linked S3 bucket to your Lustre file system.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -2140,7 +2664,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A description of the data repository task. You use data repository tasks\n to perform bulk transfer operations between your Amazon FSx file system and its linked data\n repository.

                                                                      " + "smithy.api#documentation": "

                                                                      A description of the data repository task. You use data repository tasks\n to perform bulk transfer operations between your Amazon FSx file system and a linked data\n repository.

                                                                      " } }, "com.amazonaws.fsx#DataRepositoryTaskEnded": { @@ -2209,6 +2733,10 @@ { "value": "task-lifecycle", "name": "TASK_LIFECYCLE" + }, + { + "value": "data-repository-association-id", + "name": "DATA_REPO_ASSOCIATION_ID" } ] } @@ -2351,6 +2879,10 @@ { "value": "EXPORT_TO_REPOSITORY", "name": "EXPORT" + }, + { + "value": "IMPORT_METADATA_FROM_REPOSITORY", + "name": "IMPORT" } ] } @@ -2399,7 +2931,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes an Amazon FSx backup, deleting its contents. After deletion, the backup no longer exists, and its data is gone.

                                                                      \n\n

                                                                      The DeleteBackup call returns instantly. The backup will not show up\n in later DescribeBackups calls.

                                                                      \n\n \n

                                                                      The data in a deleted backup is also deleted and can't be recovered by any\n means.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Deletes an Amazon FSx backup. After deletion, the backup no longer exists, and\n its data is gone.

                                                                      \n\n

                                                                      The DeleteBackup call returns instantly. The backup won't show up in\n later DescribeBackups calls.

                                                                      \n\n \n

                                                                      The data in a deleted backup is also deleted and can't be recovered by any\n means.

                                                                      \n
                                                                      ", "smithy.api#idempotent": {} } }, @@ -2409,20 +2941,20 @@ "BackupId": { "target": "com.amazonaws.fsx#BackupId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the backup you want to delete.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the backup that you want to delete.

                                                                      ", "smithy.api#required": {} } }, "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent deletion. This is automatically filled on your behalf when using\n the CLI or SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent deletion. This parameter is automatically filled on your behalf when using\n the CLI or SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } } }, "traits": { - "smithy.api#documentation": "

                                                                      The request object for DeleteBackup operation.

                                                                      " + "smithy.api#documentation": "

                                                                      The request object for the DeleteBackup operation.

                                                                      " } }, "com.amazonaws.fsx#DeleteBackupResponse": { @@ -2431,34 +2963,40 @@ "BackupId": { "target": "com.amazonaws.fsx#BackupId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the backup deleted.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the backup that was deleted.

                                                                      " } }, "Lifecycle": { "target": "com.amazonaws.fsx#BackupLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      The lifecycle of the backup. Should be DELETED.

                                                                      " + "smithy.api#documentation": "

                                                                      The lifecycle status of the backup. If the DeleteBackup operation is\n successful, the status is DELETED.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The response object for DeleteBackup operation.

                                                                      " + "smithy.api#documentation": "

                                                                      The response object for the DeleteBackup operation.

                                                                      " } }, - "com.amazonaws.fsx#DeleteFileSystem": { + "com.amazonaws.fsx#DeleteDataInFileSystem": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.fsx#DeleteDataRepositoryAssociation": { "type": "operation", "input": { - "target": "com.amazonaws.fsx#DeleteFileSystemRequest" + "target": "com.amazonaws.fsx#DeleteDataRepositoryAssociationRequest" }, "output": { - "target": "com.amazonaws.fsx#DeleteFileSystemResponse" + "target": "com.amazonaws.fsx#DeleteDataRepositoryAssociationResponse" }, "errors": [ { "target": "com.amazonaws.fsx#BadRequest" }, { - "target": "com.amazonaws.fsx#FileSystemNotFound" + "target": "com.amazonaws.fsx#DataRepositoryAssociationNotFound" }, { "target": "com.amazonaws.fsx#IncompatibleParameterError" @@ -2471,42 +3009,120 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes a file system, deleting its contents. After deletion, the file system no\n longer exists, and its data is gone. Any existing automatic backups will also be\n deleted.

                                                                      \n

                                                                      To delete an Amazon FSx for NetApp ONTAP file system, first delete all the volumes\n and SVMs on the file system. Then provide a FileSystemId value to the\n DeleFileSystem operation.

                                                                      \n \n

                                                                      By default, when you delete an Amazon FSx for Windows File Server file system, a\n final backup is created upon deletion. This final backup is not subject to the file\n system's retention policy, and must be manually deleted.

                                                                      \n\n

                                                                      The DeleteFileSystem action returns while the file system has the\n DELETING status. You can check the file system deletion status by\n calling the DescribeFileSystems action, which returns a list of file\n systems in your account. If you pass the file system ID for a deleted file system, the\n DescribeFileSystems returns a FileSystemNotFound\n error.

                                                                      \n \n

                                                                      Deleting an Amazon FSx for Lustre file system will fail with a 400 BadRequest if \n a data repository task is in a PENDING or EXECUTING\n state.

                                                                      \n
                                                                      \n \n

                                                                      The data in a deleted file system is also deleted and can't be recovered by\n any means.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Deletes a data repository association on an Amazon FSx for Lustre\n file system. Deleting the data repository association unlinks the\n file system from the Amazon S3 bucket. When deleting a data repository\n association, you have the option of deleting the data in the file system\n that corresponds to the data repository association. Data repository\n associations are supported only for file systems with the\n Persistent_2 deployment type.

                                                                      ", "smithy.api#idempotent": {} } }, - "com.amazonaws.fsx#DeleteFileSystemLustreConfiguration": { + "com.amazonaws.fsx#DeleteDataRepositoryAssociationRequest": { "type": "structure", "members": { - "SkipFinalBackup": { - "target": "com.amazonaws.fsx#Flag", + "AssociationId": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationId", "traits": { - "smithy.api#documentation": "

                                                                      Set SkipFinalBackup to false if you want to take a final backup of the file \n system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the\n DeleteFileSystem operation is invoked. (Default = true)

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the data repository association that you want to delete.

                                                                      ", + "smithy.api#required": {} } }, - "FinalBackupTags": { - "target": "com.amazonaws.fsx#Tags", + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      Use if SkipFinalBackup is set to false, \n and you want to apply an array of tags to the final backup. If you have set the file system property\n CopyTagsToBackups to true, and \n you specify one or more FinalBackupTags when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.

                                                                      " + "smithy.api#idempotencyToken": {} + } + }, + "DeleteDataInFileSystem": { + "target": "com.amazonaws.fsx#DeleteDataInFileSystem", + "traits": { + "smithy.api#documentation": "

                                                                      Set to true to delete the data in the file system that corresponds\n to the data repository association.

                                                                      ", + "smithy.api#required": {} } } - }, - "traits": { - "smithy.api#documentation": "

                                                                      The configuration object for the Amazon FSx for Lustre file system being deleted in the\n DeleteFileSystem operation.

                                                                      " } }, - "com.amazonaws.fsx#DeleteFileSystemLustreResponse": { + "com.amazonaws.fsx#DeleteDataRepositoryAssociationResponse": { "type": "structure", "members": { - "FinalBackupId": { - "target": "com.amazonaws.fsx#BackupId", + "AssociationId": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the final backup for this file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the data repository association being deleted.

                                                                      " } }, - "FinalBackupTags": { - "target": "com.amazonaws.fsx#Tags", - "traits": { + "Lifecycle": { + "target": "com.amazonaws.fsx#DataRepositoryLifecycle", + "traits": { + "smithy.api#documentation": "

                                                                      Describes the lifecycle state of the data repository association being deleted.

                                                                      " + } + }, + "DeleteDataInFileSystem": { + "target": "com.amazonaws.fsx#DeleteDataInFileSystem", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates whether data in the file system that corresponds to the data\n repository association is being deleted. Default is false.

                                                                      " + } + } + } + }, + "com.amazonaws.fsx#DeleteFileSystem": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#DeleteFileSystemRequest" + }, + "output": { + "target": "com.amazonaws.fsx#DeleteFileSystemResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#IncompatibleParameterError" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a file system. After deletion, the file system no longer exists, and its data\n is gone. Any existing automatic backups and snapshots are also deleted.

                                                                      \n

                                                                      To delete an Amazon FSx for NetApp ONTAP file system, first delete all the\n volumes and storage virtual machines (SVMs) on the file system. Then provide a\n FileSystemId value to the DeleFileSystem operation.

                                                                      \n \n

                                                                      By default, when you delete an Amazon FSx for Windows File Server file system,\n a final backup is created upon deletion. This final backup isn't subject to the file\n system's retention policy, and must be manually deleted.

                                                                      \n\n

                                                                      The DeleteFileSystem operation returns while the file system has the\n DELETING status. You can check the file system deletion status by\n calling the DescribeFileSystems operation, which returns a list of file systems in your\n account. If you pass the file system ID for a deleted file system, the\n DescribeFileSystems operation returns a FileSystemNotFound\n error.

                                                                      \n \n

                                                                      If a data repository task is in a PENDING or EXECUTING state,\n deleting an Amazon FSx for Lustre file system will fail with an HTTP status\n code 400 (Bad Request).

                                                                      \n
                                                                      \n \n

                                                                      The data in a deleted file system is also deleted and can't be recovered by\n any means.

                                                                      \n
                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#DeleteFileSystemLustreConfiguration": { + "type": "structure", + "members": { + "SkipFinalBackup": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      Set SkipFinalBackup to false if you want to take a final backup of the file \n system you are deleting. By default, Amazon FSx will not take a final backup on your behalf when the\n DeleteFileSystem operation is invoked. (Default = true)

                                                                      \n \n

                                                                      The fsx:CreateBackup permission is required if you set SkipFinalBackup to false in order to delete the file system and take a final backup.

                                                                      \n
                                                                      " + } + }, + "FinalBackupTags": { + "target": "com.amazonaws.fsx#Tags", + "traits": { + "smithy.api#documentation": "

                                                                      Use if SkipFinalBackup is set to false, \n and you want to apply an array of tags to the final backup. If you have set the file system property\n CopyTagsToBackups to true, and \n you specify one or more FinalBackupTags when deleting a file system, Amazon FSx will not copy any existing file system tags to the backup.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for the Amazon FSx for Lustre file system being deleted in the\n DeleteFileSystem operation.

                                                                      " + } + }, + "com.amazonaws.fsx#DeleteFileSystemLustreResponse": { + "type": "structure", + "members": { + "FinalBackupId": { + "target": "com.amazonaws.fsx#BackupId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the final backup for this file system.

                                                                      " + } + }, + "FinalBackupTags": { + "target": "com.amazonaws.fsx#Tags", + "traits": { "smithy.api#documentation": "

                                                                      The set of tags applied to the final backup.

                                                                      " } } @@ -2515,20 +3131,51 @@ "smithy.api#documentation": "

                                                                      The response object for the Amazon FSx for Lustre file system being deleted in the\n DeleteFileSystem operation.

                                                                      " } }, + "com.amazonaws.fsx#DeleteFileSystemOpenZFSConfiguration": { + "type": "structure", + "members": { + "SkipFinalBackup": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      By default, Amazon FSx for OpenZFS takes a final backup on your behalf when\n the DeleteFileSystem operation is invoked. Doing this helps protect you\n from data loss, and we highly recommend taking the final backup. If you want to skip\n this backup, use this\n value\n to do so.

                                                                      " + } + }, + "FinalBackupTags": { + "target": "com.amazonaws.fsx#Tags" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for the OpenZFS file system used in the\n DeleteFileSystem operation.

                                                                      " + } + }, + "com.amazonaws.fsx#DeleteFileSystemOpenZFSResponse": { + "type": "structure", + "members": { + "FinalBackupId": { + "target": "com.amazonaws.fsx#BackupId" + }, + "FinalBackupTags": { + "target": "com.amazonaws.fsx#Tags" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The response object for the Amazon FSx for OpenZFS file system that's being\n deleted in the DeleteFileSystem operation.

                                                                      " + } + }, "com.amazonaws.fsx#DeleteFileSystemRequest": { "type": "structure", "members": { "FileSystemId": { "target": "com.amazonaws.fsx#FileSystemId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the file system you want to delete.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the file system that you want to delete.

                                                                      ", "smithy.api#required": {} } }, "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent deletion. This is automatically filled on your behalf when using the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent deletion. This token is automatically filled on your behalf when using the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } }, @@ -2537,6 +3184,12 @@ }, "LustreConfiguration": { "target": "com.amazonaws.fsx#DeleteFileSystemLustreConfiguration" + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#DeleteFileSystemOpenZFSConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for the OpenZFS file system used in the\n DeleteFileSystem operation.

                                                                      " + } } }, "traits": { @@ -2549,13 +3202,13 @@ "FileSystemId": { "target": "com.amazonaws.fsx#FileSystemId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the file system being deleted.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the file system that's being deleted.

                                                                      " } }, "Lifecycle": { "target": "com.amazonaws.fsx#FileSystemLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      The file system lifecycle for the deletion request. Should be\n DELETING.

                                                                      " + "smithy.api#documentation": "

                                                                      The file system lifecycle for the deletion request. If the\n DeleteFileSystem operation is successful, this status is\n DELETING.

                                                                      " } }, "WindowsResponse": { @@ -2563,6 +3216,12 @@ }, "LustreResponse": { "target": "com.amazonaws.fsx#DeleteFileSystemLustreResponse" + }, + "OpenZFSResponse": { + "target": "com.amazonaws.fsx#DeleteFileSystemOpenZFSResponse", + "traits": { + "smithy.api#documentation": "

                                                                      The response object for the OpenZFS file system that's being deleted in the\n DeleteFileSystem operation.

                                                                      " + } } }, "traits": { @@ -2609,6 +3268,88 @@ "smithy.api#documentation": "

                                                                      The response object for the Microsoft Windows file system used in the\n DeleteFileSystem operation.

                                                                      " } }, + "com.amazonaws.fsx#DeleteOpenZFSVolumeOption": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS", + "name": "DELETE_CHILD_VOLUMES_AND_SNAPSHOTS" + } + ] + } + }, + "com.amazonaws.fsx#DeleteOpenZFSVolumeOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#DeleteOpenZFSVolumeOption" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.fsx#DeleteSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#DeleteSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.fsx#DeleteSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#SnapshotNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes the Amazon FSx snapshot. After deletion, the snapshot no longer\n exists, and its data is gone. Deleting a snapshot doesn't affect snapshots stored in a\n file system backup.

                                                                      \n

                                                                      The DeleteSnapshot operation returns instantly. The snapshot appears with\n the lifecycle status of DELETING until the deletion is complete.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#DeleteSnapshotRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "SnapshotId": { + "target": "com.amazonaws.fsx#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot that you want to delete.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.fsx#DeleteSnapshotResponse": { + "type": "structure", + "members": { + "SnapshotId": { + "target": "com.amazonaws.fsx#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the deleted snapshot.

                                                                      " + } + }, + "Lifecycle": { + "target": "com.amazonaws.fsx#SnapshotLifecycle", + "traits": { + "smithy.api#documentation": "

                                                                      The lifecycle status of the snapshot. If the DeleteSnapshot operation is\n successful, this status is DELETING.

                                                                      " + } + } + } + }, "com.amazonaws.fsx#DeleteStorageVirtualMachine": { "type": "operation", "input": { @@ -2693,7 +3434,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes an Amazon FSx for NetApp ONTAP volume. When deleting a volume, \n you have the option of creating a final backup. If you create a final backup, you have the option to \n apply Tags to the backup. You need to have fsx:TagResource \n permission in order to apply tags to the backup.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS\n volume.

                                                                      " } }, "com.amazonaws.fsx#DeleteVolumeOntapConfiguration": { @@ -2727,6 +3468,20 @@ "smithy.api#documentation": "

                                                                      The response object for the Amazon FSx for NetApp ONTAP volume being deleted\n in the DeleteVolume operation.

                                                                      " } }, + "com.amazonaws.fsx#DeleteVolumeOpenZFSConfiguration": { + "type": "structure", + "members": { + "Options": { + "target": "com.amazonaws.fsx#DeleteOpenZFSVolumeOptions", + "traits": { + "smithy.api#documentation": "

                                                                      To delete the volume's children and snapshots, use the string\n DELETE_CHILD_VOLUMES_AND_SNAPSHOTS.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A value that specifies whether to delete all child volumes and snapshots.

                                                                      " + } + }, "com.amazonaws.fsx#DeleteVolumeRequest": { "type": "structure", "members": { @@ -2739,14 +3494,20 @@ "VolumeId": { "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the volume you are deleting.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the volume that you are deleting.

                                                                      ", "smithy.api#required": {} } }, "OntapConfiguration": { "target": "com.amazonaws.fsx#DeleteVolumeOntapConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      For Amazon FSx for ONTAP volumes, specify whether to take \n a final backup of the volume, and apply tags to the backup.

                                                                      " + "smithy.api#documentation": "

                                                                      For Amazon FSx for ONTAP volumes, specify whether to take a final backup of\n the volume and apply tags to the backup. To apply tags to the backup, you must have the\n fsx:TagResource permission.

                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#DeleteVolumeOpenZFSConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      For Amazon FSx for OpenZFS volumes, specify whether to delete all child\n volumes and snapshots.

                                                                      " } } } @@ -2757,19 +3518,19 @@ "VolumeId": { "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the volume being deleted.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the volume that's being deleted.

                                                                      " } }, "Lifecycle": { "target": "com.amazonaws.fsx#VolumeLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      Describes the lifecycle state of the volume being deleted.

                                                                      " + "smithy.api#documentation": "

                                                                      The lifecycle state of the volume being deleted. If the DeleteVolume\n operation is successful, this value is DELETING.

                                                                      " } }, "OntapResponse": { "target": "com.amazonaws.fsx#DeleteVolumeOntapResponse", "traits": { - "smithy.api#documentation": "

                                                                      Returned after a DeleteVolume request, showing the status of the delete request.\n

                                                                      " + "smithy.api#documentation": "

                                                                      Returned after a DeleteVolume request, showing the status of the delete\n request.

                                                                      " } } } @@ -2800,7 +3561,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the description of specific Amazon FSx backups, if\n a BackupIds value is provided for that backup. Otherwise, it returns all\n backups owned by your Amazon Web Services account in the Amazon Web Services Region\n of the endpoint that you're calling.

                                                                      \n\n

                                                                      When retrieving all backups, you can optionally specify the MaxResults\n parameter to limit the number of backups in a response. If more backups remain, Amazon\n FSx returns a NextToken value in the response. In this case, send a later\n request with the NextToken request parameter set to the value of\n NextToken from the last response.

                                                                      \n\n

                                                                      This action is used in an iterative process to retrieve a list of your backups.\n DescribeBackups is called first without a NextTokenvalue.\n Then the action continues to be called with the NextToken parameter set to\n the value of the last NextToken value until a response has no\n NextToken.

                                                                      \n\n

                                                                      When using this action, keep the following in mind:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The implementation might return fewer than MaxResults\n backup descriptions while still including a NextToken\n value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The order of backups returned in the response of one\n DescribeBackups call and the order of backups returned across\n the responses of a multi-call iteration is unspecified.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Returns the description of a specific Amazon FSx backup, if a\n BackupIds value is provided for that backup. Otherwise, it returns all\n backups owned by your Amazon Web Services account in the Amazon Web Services Region of the\n endpoint that you're calling.

                                                                      \n\n

                                                                      When retrieving all backups, you can optionally specify the MaxResults\n parameter to limit the number of backups in a response. If more backups remain, Amazon FSx returns a NextToken value in the response. In this case,\n send a later request with the NextToken request parameter set to the value\n of the NextToken value from the last response.

                                                                      \n\n

                                                                      This operation is used in an iterative process to retrieve a list of your backups.\n DescribeBackups is called first without a NextToken value.\n Then the operation continues to be called with the NextToken parameter set\n to the value of the last NextToken value until a response has no\n NextToken value.

                                                                      \n\n

                                                                      When using this operation, keep the following in mind:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The operation might return fewer than the MaxResults value of\n backup descriptions while still including a NextToken\n value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The order of the backups returned in the response of one\n DescribeBackups call and the order of the backups returned\n across the responses of a multi-call iteration is unspecified.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -2814,30 +3575,30 @@ "BackupIds": { "target": "com.amazonaws.fsx#BackupIds", "traits": { - "smithy.api#documentation": "

                                                                      IDs of the backups you want to retrieve (String). This overrides any\n filters. If any IDs are not found, BackupNotFound will be thrown.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the backups that you want to retrieve. This parameter value overrides any\n filters. If any IDs aren't found, a BackupNotFound error occurs.

                                                                      " } }, "Filters": { "target": "com.amazonaws.fsx#Filters", "traits": { - "smithy.api#documentation": "

                                                                      Filters structure. Supported names are file-system-id,\n backup-type, file-system-type, and\n volume-id.

                                                                      " + "smithy.api#documentation": "

                                                                      The filters structure. The supported names are file-system-id,\n backup-type, file-system-type, and\n volume-id.

                                                                      " } }, "MaxResults": { "target": "com.amazonaws.fsx#MaxResults", "traits": { - "smithy.api#documentation": "

                                                                      Maximum number of backups to return in the response (integer). This\n parameter value must be greater than 0. The number of items that Amazon FSx returns is\n the minimum of the MaxResults parameter specified in the request and the\n service's internal maximum number of items per page.

                                                                      " + "smithy.api#documentation": "

                                                                      Maximum number of backups to return in the response. This parameter value must be\n greater than 0. The number of items that Amazon FSx returns is the minimum of\n the MaxResults parameter specified in the request and the service's\n internal maximum number of items per page.

                                                                      " } }, "NextToken": { "target": "com.amazonaws.fsx#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      Opaque pagination token returned from a previous\n DescribeBackups operation (String). If a token present, the action\n continues the list from where the returning call left off.

                                                                      " + "smithy.api#documentation": "

                                                                      An opaque pagination token returned from a previous DescribeBackups\n operation. If a token is present, the operation continues the list from where the\n returning call left off.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The request object for DescribeBackups operation.

                                                                      " + "smithy.api#documentation": "

                                                                      The request object for the DescribeBackups operation.

                                                                      " } }, "com.amazonaws.fsx#DescribeBackupsResponse": { @@ -2852,12 +3613,84 @@ "NextToken": { "target": "com.amazonaws.fsx#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      This is present if there are more backups than returned in the response (String).\n You can use the NextToken value in the later request to fetch the backups.\n

                                                                      " + "smithy.api#documentation": "

                                                                      A NextToken value is present if there are more backups than returned in\n the response. You can use the NextToken value in the subsequent request to\n fetch the backups.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Response object for DescribeBackups operation.

                                                                      " + "smithy.api#documentation": "

                                                                      Response object for the DescribeBackups operation.

                                                                      " + } + }, + "com.amazonaws.fsx#DescribeDataRepositoryAssociations": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#DescribeDataRepositoryAssociationsRequest" + }, + "output": { + "target": "com.amazonaws.fsx#DescribeDataRepositoryAssociationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#DataRepositoryAssociationNotFound" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#InvalidDataRepositoryType" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the description of specific Amazon FSx for Lustre data repository associations, if\n one or more AssociationIds values are provided in the request, or if filters are\n used in the request. Data repository associations are supported only\n for file systems with the Persistent_2 deployment type.

                                                                      \n \n

                                                                      You can use filters to narrow the response to include just data repository\n associations for specific file systems (use the file-system-id filter with\n the ID of the file system) or data repository associations for a specific repository type\n (use the data-repository-type filter with a value of S3).\n If you don't use filters, the response returns all data repository associations\n owned by your Amazon Web Services account in the Amazon Web Services Region of the endpoint\n that you're calling.

                                                                      \n \n

                                                                      When retrieving all data repository associations, you can paginate the response by using\n the optional MaxResults parameter to limit the number of data repository associations\n returned in a response. If more data repository associations remain, Amazon FSx returns a\n NextToken value in the response. In this case, send a later\n request with the NextToken request parameter set to the value of\n NextToken from the last response.

                                                                      ", + "smithy.api#idempotent": {}, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.fsx#DescribeDataRepositoryAssociationsRequest": { + "type": "structure", + "members": { + "AssociationIds": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationIds", + "traits": { + "smithy.api#documentation": "

                                                                      IDs of the data repository associations whose descriptions you want to retrieve\n (String).

                                                                      " + } + }, + "Filters": { + "target": "com.amazonaws.fsx#Filters" + }, + "MaxResults": { + "target": "com.amazonaws.fsx#LimitedMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of resources to return in the response. This value must be\n an integer greater than zero.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.fsx#NextToken" + } + } + }, + "com.amazonaws.fsx#DescribeDataRepositoryAssociationsResponse": { + "type": "structure", + "members": { + "Associations": { + "target": "com.amazonaws.fsx#DataRepositoryAssociations", + "traits": { + "smithy.api#documentation": "

                                                                      An array of one ore more data repository association descriptions.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.fsx#NextToken" + } } }, "com.amazonaws.fsx#DescribeDataRepositoryTasks": { @@ -3029,7 +3862,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the description of specific Amazon FSx file systems, if a\n FileSystemIds value is provided for that file system. Otherwise, it\n returns descriptions of all file systems owned by your Amazon Web Services account in\n the Amazon Web Services Region of the endpoint that you're calling.

                                                                      \n\n

                                                                      When retrieving all file system descriptions, you can optionally specify the\n MaxResults parameter to limit the number of descriptions in a response.\n If more file system descriptions remain, Amazon FSx returns a NextToken\n value in the response. In this case, send a later request with the\n NextToken request parameter set to the value of NextToken\n from the last response.

                                                                      \n\n

                                                                      This action is used in an iterative process to retrieve a list of your file system\n descriptions. DescribeFileSystems is called first without a\n NextTokenvalue. Then the action continues to be called with the\n NextToken parameter set to the value of the last NextToken\n value until a response has no NextToken.

                                                                      \n\n

                                                                      When using this action, keep the following in mind:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The implementation might return fewer than MaxResults file\n system descriptions while still including a NextToken\n value.

                                                                        \n\n
                                                                      • \n
                                                                      • \n

                                                                        The order of file systems returned in the response of one\n DescribeFileSystems call and the order of file systems returned\n across the responses of a multicall iteration is unspecified.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Returns the description of specific Amazon FSx file systems, if a\n FileSystemIds value is provided for that file system. Otherwise, it\n returns descriptions of all file systems owned by your Amazon Web Services account in the\n Amazon Web Services Region of the endpoint that you're calling.

                                                                      \n\n

                                                                      When retrieving all file system descriptions, you can optionally specify the\n MaxResults parameter to limit the number of descriptions in a response.\n If more file system descriptions remain, Amazon FSx returns a\n NextToken value in the response. In this case, send a later request\n with the NextToken request parameter set to the value of\n NextToken from the last response.

                                                                      \n\n

                                                                      This operation is used in an iterative process to retrieve a list of your file system\n descriptions. DescribeFileSystems is called first without a\n NextTokenvalue. Then the operation continues to be called with the\n NextToken parameter set to the value of the last NextToken\n value until a response has no NextToken.

                                                                      \n\n

                                                                      When using this operation, keep the following in mind:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The implementation might return fewer than MaxResults file\n system descriptions while still including a NextToken\n value.

                                                                        \n\n
                                                                      • \n
                                                                      • \n

                                                                        The order of file systems returned in the response of one\n DescribeFileSystems call and the order of file systems returned\n across the responses of a multicall iteration is unspecified.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3055,7 +3888,7 @@ "NextToken": { "target": "com.amazonaws.fsx#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      Opaque pagination token returned from a previous\n DescribeFileSystems operation (String). If a token present, the action\n continues the list from where the returning call left off.

                                                                      " + "smithy.api#documentation": "

                                                                      Opaque pagination token returned from a previous DescribeFileSystems\n operation (String). If a token present, the operation continues the list from where the\n returning call left off.

                                                                      " } } }, @@ -3083,13 +3916,13 @@ "smithy.api#documentation": "

                                                                      The response object for DescribeFileSystems operation.

                                                                      " } }, - "com.amazonaws.fsx#DescribeStorageVirtualMachines": { + "com.amazonaws.fsx#DescribeSnapshots": { "type": "operation", "input": { - "target": "com.amazonaws.fsx#DescribeStorageVirtualMachinesRequest" + "target": "com.amazonaws.fsx#DescribeSnapshotsRequest" }, "output": { - "target": "com.amazonaws.fsx#DescribeStorageVirtualMachinesResponse" + "target": "com.amazonaws.fsx#DescribeSnapshotsResponse" }, "errors": [ { @@ -3099,11 +3932,11 @@ "target": "com.amazonaws.fsx#InternalServerError" }, { - "target": "com.amazonaws.fsx#StorageVirtualMachineNotFound" + "target": "com.amazonaws.fsx#SnapshotNotFound" } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes one or more Amazon FSx for NetApp ONTAP storage virtual machines (SVMs).

                                                                      ", + "smithy.api#documentation": "

                                                                      Returns the description of specific Amazon FSx snapshots, if a\n SnapshotIds value is provided. Otherwise, this operation returns all\n snapshots owned by your Amazon Web Services account in the Amazon Web Services Region of\n the endpoint that you're calling.

                                                                      \n

                                                                      When retrieving all snapshots, you can optionally specify the MaxResults\n parameter to limit the number of snapshots in a response. If more backups remain,\n Amazon FSx returns a NextToken value in the response. In this\n case, send a later request with the NextToken request parameter set to the\n value of NextToken from the last response.

                                                                      \n

                                                                      Use this operation in an iterative process to retrieve a list of your snapshots.\n DescribeSnapshots is called first without a NextToken\n value. Then the operation continues to be called with the NextToken\n parameter set to the value of the last NextToken value until a response has\n no NextToken value.

                                                                      \n

                                                                      When using this operation, keep the following in mind:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The operation might return fewer than the MaxResults value of\n snapshot descriptions while still including a NextToken\n value.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The order of snapshots returned in the response of one\n DescribeSnapshots call and the order of backups returned across\n the responses of a multi-call iteration is unspecified.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3111,19 +3944,19 @@ } } }, - "com.amazonaws.fsx#DescribeStorageVirtualMachinesRequest": { + "com.amazonaws.fsx#DescribeSnapshotsRequest": { "type": "structure", "members": { - "StorageVirtualMachineIds": { - "target": "com.amazonaws.fsx#StorageVirtualMachineIds", + "SnapshotIds": { + "target": "com.amazonaws.fsx#SnapshotIds", "traits": { - "smithy.api#documentation": "

                                                                      Enter the ID of one or more SVMs that you want to view.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the snapshots that you want to retrieve. This parameter value overrides any\n filters. If any IDs aren't found, a SnapshotNotFound error occurs.

                                                                      " } }, "Filters": { - "target": "com.amazonaws.fsx#StorageVirtualMachineFilters", + "target": "com.amazonaws.fsx#SnapshotFilters", "traits": { - "smithy.api#documentation": "

                                                                      Enter a filter name:value pair to view a select set of SVMs.

                                                                      " + "smithy.api#documentation": "

                                                                      The filters structure. The supported names are file-system-id or\n volume-id.

                                                                      " } }, "MaxResults": { @@ -3134,13 +3967,13 @@ } } }, - "com.amazonaws.fsx#DescribeStorageVirtualMachinesResponse": { + "com.amazonaws.fsx#DescribeSnapshotsResponse": { "type": "structure", "members": { - "StorageVirtualMachines": { - "target": "com.amazonaws.fsx#StorageVirtualMachines", + "Snapshots": { + "target": "com.amazonaws.fsx#Snapshots", "traits": { - "smithy.api#documentation": "

                                                                      Returned after a successful DescribeStorageVirtualMachines operation, describing each SVM.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of snapshots.

                                                                      " } }, "NextToken": { @@ -3148,13 +3981,13 @@ } } }, - "com.amazonaws.fsx#DescribeVolumes": { + "com.amazonaws.fsx#DescribeStorageVirtualMachines": { "type": "operation", "input": { - "target": "com.amazonaws.fsx#DescribeVolumesRequest" + "target": "com.amazonaws.fsx#DescribeStorageVirtualMachinesRequest" }, "output": { - "target": "com.amazonaws.fsx#DescribeVolumesResponse" + "target": "com.amazonaws.fsx#DescribeStorageVirtualMachinesResponse" }, "errors": [ { @@ -3164,11 +3997,11 @@ "target": "com.amazonaws.fsx#InternalServerError" }, { - "target": "com.amazonaws.fsx#VolumeNotFound" + "target": "com.amazonaws.fsx#StorageVirtualMachineNotFound" } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes one or more Amazon FSx for NetApp ONTAP volumes.

                                                                      ", + "smithy.api#documentation": "

                                                                      Describes one or more Amazon FSx for NetApp ONTAP storage virtual machines (SVMs).

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -3176,19 +4009,84 @@ } } }, - "com.amazonaws.fsx#DescribeVolumesRequest": { + "com.amazonaws.fsx#DescribeStorageVirtualMachinesRequest": { + "type": "structure", + "members": { + "StorageVirtualMachineIds": { + "target": "com.amazonaws.fsx#StorageVirtualMachineIds", + "traits": { + "smithy.api#documentation": "

                                                                      Enter the ID of one or more SVMs that you want to view.

                                                                      " + } + }, + "Filters": { + "target": "com.amazonaws.fsx#StorageVirtualMachineFilters", + "traits": { + "smithy.api#documentation": "

                                                                      Enter a filter name:value pair to view a select set of SVMs.

                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.fsx#MaxResults" + }, + "NextToken": { + "target": "com.amazonaws.fsx#NextToken" + } + } + }, + "com.amazonaws.fsx#DescribeStorageVirtualMachinesResponse": { + "type": "structure", + "members": { + "StorageVirtualMachines": { + "target": "com.amazonaws.fsx#StorageVirtualMachines", + "traits": { + "smithy.api#documentation": "

                                                                      Returned after a successful DescribeStorageVirtualMachines operation, describing each SVM.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.fsx#NextToken" + } + } + }, + "com.amazonaws.fsx#DescribeVolumes": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#DescribeVolumesRequest" + }, + "output": { + "target": "com.amazonaws.fsx#DescribeVolumesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#VolumeNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Describes one or more Amazon FSx for NetApp ONTAP or Amazon FSx for\n OpenZFS volumes.

                                                                      ", + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.fsx#DescribeVolumesRequest": { "type": "structure", "members": { "VolumeIds": { "target": "com.amazonaws.fsx#VolumeIds", "traits": { - "smithy.api#documentation": "

                                                                      IDs of the volumes whose descriptions you want to retrieve.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the volumes whose descriptions you want to retrieve.

                                                                      " } }, "Filters": { "target": "com.amazonaws.fsx#VolumeFilters", "traits": { - "smithy.api#documentation": "

                                                                      Enter a filter name:value pair to view a select set of volumes.

                                                                      " + "smithy.api#documentation": "

                                                                      Enter a filter Name and Values pair to view a select set of\n volumes.

                                                                      " } }, "MaxResults": { @@ -3326,7 +4224,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The SSD IOPS (input/output operations per second) configuration\n for an Amazon FSx for NetApp ONTAP file system. The default is 3 IOPS\n per GB of storage capacity, but you can provision additional IOPS\n per GB of storage. The configuration consists of the total number\n of provisioned SSD IOPS and how the amount was provisioned\n (by the customer or by the system).

                                                                      " + "smithy.api#documentation": "

                                                                      The SSD IOPS (input/output operations per second) configuration for an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS file system. The\n default is 3 IOPS per GB of storage capacity, but you can provision additional IOPS per\n GB of storage. The configuration consists of the total number of provisioned SSD IOPS\n and how the amount was provisioned (by the customer or by the system).

                                                                      " } }, "com.amazonaws.fsx#DiskIopsConfigurationMode": { @@ -3384,6 +4282,37 @@ } } }, + "com.amazonaws.fsx#EventType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NEW", + "name": "NEW" + }, + { + "value": "CHANGED", + "name": "CHANGED" + }, + { + "value": "DELETED", + "name": "DELETED" + } + ] + } + }, + "com.amazonaws.fsx#EventTypes": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#EventType" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 3 + } + } + }, "com.amazonaws.fsx#FailedCount": { "type": "long", "traits": { @@ -3414,13 +4343,13 @@ "FileSystemType": { "target": "com.amazonaws.fsx#FileSystemType", "traits": { - "smithy.api#documentation": "

                                                                      The type of Amazon FSx file system, which can be LUSTRE, WINDOWS,\n or ONTAP.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of Amazon FSx file system, which can be LUSTRE,\n WINDOWS, ONTAP, or OPENZFS.

                                                                      " } }, "Lifecycle": { "target": "com.amazonaws.fsx#FileSystemLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      The lifecycle status of the file system, following are the possible values and what they mean:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The file system is in a healthy state, and is reachable and available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - Amazon FSx is deleting an existing file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - An existing file system has experienced an unrecoverable failure. \n When creating a new file system, Amazon FSx was unable to create the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED indicates that the file system is in a failed but recoverable state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATING indicates that the file system is undergoing a customer initiated update.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The lifecycle status of the file system. The following are the possible values and\n what they mean:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The file system is in a healthy state, and is reachable and available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - Amazon FSx is deleting an existing file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - An existing file system has experienced an unrecoverable failure. \n When creating a new file system, Amazon FSx was unable to create the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - The file system is in a failed but recoverable state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n UPDATING - The file system is undergoing a customer-initiated update.

                                                                        \n
                                                                      • \n
                                                                      " } }, "FailureDetails": { @@ -3435,37 +4364,37 @@ "StorageType": { "target": "com.amazonaws.fsx#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      The storage type of the file system. \n Valid values are SSD and HDD.\n If set to SSD, the file system uses solid state drive storage. \n If set to HDD, the file system uses hard disk drive storage.\n

                                                                      " + "smithy.api#documentation": "

                                                                      The type of storage the file system is using. \n If set to SSD, the file system uses solid state drive storage. \n If set to HDD, the file system uses hard disk drive storage.\n

                                                                      " } }, "VpcId": { "target": "com.amazonaws.fsx#VpcId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the primary VPC for the file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the primary virtual private cloud (VPC) for the file system.

                                                                      " } }, "SubnetIds": { "target": "com.amazonaws.fsx#SubnetIds", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system is accessible from. For Windows and\n ONTAP MULTI_AZ_1 file system deployment type, there are two subnet IDs, one for\n the preferred file server and one for the standby file server. The preferred file server subnet\n identified in the PreferredSubnetID property. All other file systems have only one subnet ID.

                                                                      \n

                                                                      For Lustre file systems, and Single-AZ Windows file systems, this is the ID of \n the subnet that contains the endpoint for the file system. For MULTI_AZ_1 Windows and\n ONTAP file systems, the endpoint for the file system is available in the PreferredSubnetID.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the IDs of the subnets that the file system is accessible from. For the Amazon FSx Windows and\n ONTAP MULTI_AZ_1 file system deployment type, there are two subnet IDs, one for\n the preferred file server and one for the standby file server. The preferred file server subnet\n identified in the PreferredSubnetID property. All other file systems have only one subnet ID.

                                                                      \n

                                                                      For FSx for Lustre file systems, and Single-AZ Windows file systems, this is the ID of \n the subnet that contains the file system's endpoint. For MULTI_AZ_1 Windows and\n ONTAP file systems, the file system endpoint is available in the PreferredSubnetID.

                                                                      " } }, "NetworkInterfaceIds": { "target": "com.amazonaws.fsx#NetworkInterfaceIds", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the elastic network interface from which a specific file system is\n accessible. The elastic network interface is automatically created in the same VPC that\n the Amazon FSx file system was created in. For more information, see Elastic Network\n Interfaces in the Amazon EC2 User Guide.\n

                                                                      \n\n

                                                                      For an Amazon FSx for Windows File Server file system, you can have one network\n interface ID. For an Amazon FSx for Lustre file system, you can have more than\n one.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the elastic network interfaces from which a specific file system is\n accessible. The elastic network interface is automatically created in the same virtual\n private cloud (VPC) that the Amazon FSx file system was created in. For more\n information, see Elastic Network Interfaces in\n the Amazon EC2 User Guide.\n

                                                                      \n\n

                                                                      For an Amazon FSx for Windows File Server file system, you can have one\n network interface ID. For an Amazon FSx for Lustre file system, you can have\n more than one.

                                                                      " } }, "DNSName": { "target": "com.amazonaws.fsx#DNSName", "traits": { - "smithy.api#documentation": "

                                                                      The DNS name for the file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The Domain Name System (DNS) name for the file system.

                                                                      " } }, "KmsKeyId": { "target": "com.amazonaws.fsx#KmsKeyId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the file system's data\n for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and\n persistent Amazon FSx for Lustre file systems at rest. If not specified, the Amazon FSx\n managed key is used. The scratch Amazon FSx for Lustre file systems are always encrypted at rest using\n Amazon FSx managed keys. For more information, see Encrypt\n in the Key Management Service API Reference.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the file\n system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and PERSISTENT\n Amazon FSx for Lustre file systems at rest. If this ID isn't specified, the\n Amazon FSx-managed key for your account is used. The scratch Amazon FSx for Lustre file systems are always encrypted at rest using the Amazon\n FSx-managed key for your account. For more information, see Encrypt in the\n Key Management Service API Reference.

                                                                      " } }, "ResourceARN": { @@ -3477,13 +4406,13 @@ "Tags": { "target": "com.amazonaws.fsx#Tags", "traits": { - "smithy.api#documentation": "

                                                                      The tags to associate with the file system. For more information, see Tagging Your\n Amazon EC2 Resources in the Amazon EC2 User\n Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The tags to associate with the file system. For more information, see Tagging your\n Amazon EC2 resources in the Amazon EC2 User\n Guide.

                                                                      " } }, "WindowsConfiguration": { "target": "com.amazonaws.fsx#WindowsFileSystemConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The configuration for this Microsoft Windows file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration for this FSx for Windows File Server file system.

                                                                      " } }, "LustreConfiguration": { @@ -3492,19 +4421,25 @@ "AdministrativeActions": { "target": "com.amazonaws.fsx#AdministrativeActions", "traits": { - "smithy.api#documentation": "

                                                                      A list of administrative actions for the file system that are in process or waiting to be processed. \n Administrative actions describe changes to the Amazon FSx file system that you have initiated using\n the UpdateFileSystem action.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of administrative actions for the file system that are in process or waiting to\n be processed. Administrative actions describe changes to the Amazon FSx system\n that you have initiated using the UpdateFileSystem operation.

                                                                      " } }, "OntapConfiguration": { "target": "com.amazonaws.fsx#OntapFileSystemConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The configuration for this FSx for NetApp ONTAP file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration for this FSx for ONTAP file system.

                                                                      " } }, "FileSystemTypeVersion": { "target": "com.amazonaws.fsx#FileSystemTypeVersion", "traits": { - "smithy.api#documentation": "

                                                                      The version of your Amazon FSx for Lustre file system, either\n 2.10 or 2.12.

                                                                      " + "smithy.api#documentation": "

                                                                      The Lustre version of the Amazon FSx for Lustrefile system, either\n 2.10 or 2.12.

                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#OpenZFSFileSystemConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for this Amazon FSx for OpenZFS file system.

                                                                      " } } }, @@ -3570,7 +4505,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure providing details of any failures that occur when creating the file system\n has failed.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure providing details of any failures that occurred when creating a file\n system.

                                                                      " } }, "com.amazonaws.fsx#FileSystemId": { @@ -3590,7 +4525,7 @@ "target": "com.amazonaws.fsx#FileSystemId" }, "traits": { - "smithy.api#documentation": "

                                                                      A list of FileSystemIds.

                                                                      ", + "smithy.api#documentation": "

                                                                      A list of file system IDs.

                                                                      ", "smithy.api#length": { "min": 0, "max": 50 @@ -3686,6 +4621,10 @@ { "value": "ONTAP", "name": "ONTAP" + }, + { + "value": "OPENZFS", + "name": "OPENZFS" } ] } @@ -3697,7 +4636,7 @@ "min": 1, "max": 20 }, - "smithy.api#pattern": "^[0-9](\\.[0-9]*)*$" + "smithy.api#pattern": "^[0-9](.[0-9]*)*$" } }, "com.amazonaws.fsx#FileSystems": { @@ -3753,6 +4692,10 @@ { "value": "volume-id", "name": "VOLUME_ID" + }, + { + "value": "data-repository-type", + "name": "DATA_REPOSITORY_TYPE" } ] } @@ -3856,10 +4799,20 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Amazon FSx doesn't support Multi-AZ Windows File Server\n copy backup in the destination Region, so the copied backup\n can't be restored.

                                                                      ", + "smithy.api#documentation": "

                                                                      Amazon FSx doesn't support Multi-AZ Windows File Server copy backup in the\n destination Region, so the copied backup can't be restored.

                                                                      ", "smithy.api#error": "client" } }, + "com.amazonaws.fsx#IntegerNoMax": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 2147483647 + } + } + }, "com.amazonaws.fsx#InternalServerError": { "type": "structure", "members": { @@ -3872,6 +4825,18 @@ "smithy.api#error": "server" } }, + "com.amazonaws.fsx#InvalidDataRepositoryType": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.fsx#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      You have filtered the response to a data repository type that is not supported.

                                                                      ", + "smithy.api#error": "client" + } + }, "com.amazonaws.fsx#InvalidDestinationKmsKey": { "type": "structure", "members": { @@ -3880,7 +4845,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The Key Management Service (KMS) key of the destination\n backup is invalid.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Key Management Service (KMS) key of the destination backup is not\n valid.

                                                                      ", "smithy.api#error": "client" } }, @@ -3961,7 +4926,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The Region provided for Source Region is invalid or\n is in a different Amazon Web Services partition.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Region provided for SourceRegion is not valid or is in a different\n Amazon Web Services partition.

                                                                      ", "smithy.api#error": "client" } }, @@ -3973,7 +4938,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The Key Management Service (KMS) key of the source backup\n is invalid.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Key Management Service (KMS) key of the source backup is not\n valid.

                                                                      ", "smithy.api#error": "client" } }, @@ -3983,7 +4948,7 @@ "smithy.api#box": {}, "smithy.api#range": { "min": 0, - "max": 80000 + "max": 160000 } } }, @@ -4020,7 +4985,7 @@ "com.amazonaws.fsx#KmsKeyId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the file system's data\n for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and\n Amazon FSx for Lustre PERSISTENT_1 file systems at rest. If not specified, the Amazon FSx\n managed key is used. The Amazon FSx for Lustre SCRATCH_1 and SCRATCH_2 file systems\n are always encrypted at rest using Amazon FSx managed keys. For more information, see Encrypt\n in the Key Management Service API Reference.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the Key Management Service (KMS) key used to encrypt the file\n system's data for Amazon FSx for Windows File Server file systems, Amazon FSx for NetApp ONTAP file systems, and Amazon FSx for Lustre\n PERSISTENT_1 and PERSISTENT_2 file systems at rest. If this ID\n isn't specified, the key managed by Amazon FSx is used. The Amazon FSx for Lustre\n SCRATCH_1 and SCRATCH_2 file systems are always encrypted at\n rest using Amazon FSx-managed keys. For more information, see Encrypt\n in the Key Management Service API Reference.

                                                                      ", "smithy.api#length": { "min": 1, "max": 2048 @@ -4042,6 +5007,16 @@ "smithy.api#documentation": "

                                                                      Describes why a resource lifecycle state changed.

                                                                      " } }, + "com.amazonaws.fsx#LimitedMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 25 + } + } + }, "com.amazonaws.fsx#ListTagsForResource": { "type": "operation", "input": { @@ -4123,6 +5098,29 @@ "smithy.api#documentation": "

                                                                      The response object for ListTagsForResource operation.

                                                                      " } }, + "com.amazonaws.fsx#LustreAccessAuditLogLevel": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DISABLED", + "name": "DISABLED" + }, + { + "value": "WARN_ONLY", + "name": "WARN_ONLY" + }, + { + "value": "ERROR_ONLY", + "name": "ERROR_ONLY" + }, + { + "value": "WARN_ERROR", + "name": "WARN_ERROR" + } + ] + } + }, "com.amazonaws.fsx#LustreDeploymentType": { "type": "string", "traits": { @@ -4138,6 +5136,10 @@ { "value": "PERSISTENT_1", "name": "PERSISTENT_1" + }, + { + "value": "PERSISTENT_2", + "name": "PERSISTENT_2" } ] } @@ -4148,7 +5150,7 @@ "WeeklyMaintenanceStartTime": { "target": "com.amazonaws.fsx#WeeklyTime", "traits": { - "smithy.api#documentation": "

                                                                      The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC\n time zone. d is the weekday number, from 1 through 7, beginning with Monday and ending with Sunday.

                                                                      " + "smithy.api#documentation": "

                                                                      The preferred start time to perform weekly maintenance, formatted d:HH:MM in the UTC\n time zone. Here, d is the weekday number, from 1 through 7, beginning with Monday and\n ending with Sunday.

                                                                      " } }, "DataRepositoryConfiguration": { @@ -4157,19 +5159,19 @@ "DeploymentType": { "target": "com.amazonaws.fsx#LustreDeploymentType", "traits": { - "smithy.api#documentation": "

                                                                      The deployment type of the FSX for Lustre file system. Scratch deployment type is designed for temporary storage\n and shorter-term processing of data.

                                                                      \n

                                                                      \n SCRATCH_1 and SCRATCH_2 deployment \n types are best suited for when you need temporary storage and shorter-term processing of data. \n The SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

                                                                      \n

                                                                      The PERSISTENT_1 deployment type is used for longer-term storage \n and workloads and encryption of data in transit. To learn more about deployment types, see \n \n FSx for Lustre Deployment Options. (Default = SCRATCH_1)

                                                                      " + "smithy.api#documentation": "

                                                                      The deployment type of the FSx for Lustre file system. \n Scratch deployment type is designed for temporary storage\n and shorter-term processing of data.

                                                                      \n

                                                                      \n SCRATCH_1 and SCRATCH_2 deployment types are best suited \n for when you need temporary storage and shorter-term processing of data. The \n SCRATCH_2 deployment type provides in-transit encryption of data and higher burst \n throughput capacity than SCRATCH_1.

                                                                      \n

                                                                      The PERSISTENT_1 and PERSISTENT_2 deployment type is used\n for longer-term storage and workloads and encryption of data in transit.\n PERSISTENT_2 is built on Lustre v2.12 and offers higher\n PerUnitStorageThroughput (up to 1000 MB/s/TiB) along with a lower\n minimum storage capacity requirement (600 GiB). To learn more about FSx for Lustre deployment types, see \n FSx for Lustre deployment options.

                                                                      \n

                                                                      The default is SCRATCH_1.

                                                                      " } }, "PerUnitStorageThroughput": { "target": "com.amazonaws.fsx#PerUnitStorageThroughput", "traits": { - "smithy.api#documentation": "

                                                                      Per unit storage throughput represents the megabytes per second of read or write\n throughput per 1 tebibyte of storage provisioned. File system throughput capacity is\n equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is\n only valid for PERSISTENT_1 deployment types.

                                                                      \n

                                                                      Valid values for SSD storage: 50, 100, 200. Valid values for HDD storage: 12, 40.

                                                                      " + "smithy.api#documentation": "

                                                                      Per unit storage throughput represents the megabytes per second of read or write\n throughput per 1 tebibyte of storage provisioned. File system throughput capacity is\n equal to Storage capacity (TiB) * PerUnitStorageThroughput (MB/s/TiB). This option is\n only valid for PERSISTENT_1 and PERSISTENT_2 deployment types.

                                                                      \n

                                                                      Valid values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For PERSISTENT_1 SSD storage: 50, 100, 200.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT_1 HDD storage: 12, 40.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT_2 SSD storage: 125, 250, 500, 1000.

                                                                        \n
                                                                      • \n
                                                                      " } }, "MountName": { "target": "com.amazonaws.fsx#LustreFileSystemMountName", "traits": { - "smithy.api#documentation": "

                                                                      You use the MountName value when mounting the file system.

                                                                      \n

                                                                      For the SCRATCH_1 deployment type, this value is always \"fsx\". \n For SCRATCH_2 and PERSISTENT_1 deployment types, this \n value is a string that is unique within an Amazon Web Services Region. \n \n

                                                                      " + "smithy.api#documentation": "

                                                                      You use the MountName value when mounting the file system.

                                                                      \n

                                                                      For the SCRATCH_1 deployment type, this value is always \"fsx\". \n For SCRATCH_2, PERSISTENT_1, and PERSISTENT_2 deployment\n types, this value is a string that is unique within an Amazon Web Services Region. \n \n

                                                                      " } }, "DailyAutomaticBackupStartTime": { @@ -4181,13 +5183,13 @@ "CopyTagsToBackups": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                                                                      A boolean flag indicating whether tags on the file system should be copied to backups.\n If it's set to true, all tags on the file system are\n copied to all automatic backups and any user-initiated backups where the user\n doesn't specify any tags. If this value is true, and you specify one or more tags,\n only the specified tags are copied to backups. If you specify one or more tags when \n creating a user-initiated backup, no tags are copied from the file system, \n regardless of this value. (Default = false)

                                                                      " + "smithy.api#documentation": "

                                                                      A boolean flag indicating whether tags on the file system are copied to backups.\n If it's set to true, all tags on the file system are\n copied to all automatic backups and any user-initiated backups where the user\n doesn't specify any tags. If this value is true, and you specify one or more tags,\n only the specified tags are copied to backups. If you specify one or more tags when \n creating a user-initiated backup, no tags are copied from the file system, \n regardless of this value. (Default = false)

                                                                      " } }, "DriveCacheType": { "target": "com.amazonaws.fsx#DriveCacheType", "traits": { - "smithy.api#documentation": "

                                                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with\n HDD storage devices. This parameter is required when storage type is HDD. Set to\n READ, improve the performance for frequently accessed files and allows 20%\n of the total storage capacity of the file system to be cached.

                                                                      \n

                                                                      This parameter is required when StorageType is set to HDD.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of drive cache used by PERSISTENT_1 file systems that are provisioned with\n HDD storage devices. This parameter is required when StorageType is HDD. When set to\n READ the file system has an SSD storage cache that is sized to 20% of the file system's\n storage capacity. This improves the performance for frequently accessed files by caching up to 20%\n of the total storage capacity.

                                                                      \n

                                                                      This parameter is required when StorageType is set to HDD.

                                                                      " } }, "DataCompressionType": { @@ -4195,6 +5197,12 @@ "traits": { "smithy.api#documentation": "

                                                                      The data compression configuration for the file system. DataCompressionType\n can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Data compression is turned off for\n the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LZ4 - Data compression is turned on with the LZ4\n algorithm.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Lustre data compression.

                                                                      " } + }, + "LogConfiguration": { + "target": "com.amazonaws.fsx#LustreLogConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The Lustre logging configuration. Lustre logging writes the enabled log\n events for your file system to Amazon CloudWatch Logs.

                                                                      " + } } }, "traits": { @@ -4211,6 +5219,48 @@ "smithy.api#pattern": "^([A-Za-z0-9_-]{1,8})$" } }, + "com.amazonaws.fsx#LustreLogConfiguration": { + "type": "structure", + "members": { + "Level": { + "target": "com.amazonaws.fsx#LustreAccessAuditLogLevel", + "traits": { + "smithy.api#documentation": "

                                                                      The data repository events that are logged by Amazon FSx.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n WARN_ONLY - only warning events are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ERROR_ONLY - only error events are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WARN_ERROR - both warning events and error events\n are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISABLED - logging of data repository events\n is turned off.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } + }, + "Destination": { + "target": "com.amazonaws.fsx#GeneralARN", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that specifies the destination of the logs.\n The destination can be any Amazon CloudWatch Logs log group ARN. The destination\n ARN must be in the same Amazon Web Services partition, Amazon Web Services Region,\n and Amazon Web Services account as your Amazon FSx file system.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for Lustre logging used to write the enabled\n logging events for your file system to Amazon CloudWatch Logs.

                                                                      \n

                                                                      When logging is enabled, Lustre logs error and warning events\n from data repository operations such as automatic export and data repository tasks.\n To learn more about Lustre logging, see \n Logging with Amazon CloudWatch Logs.\n

                                                                      " + } + }, + "com.amazonaws.fsx#LustreLogCreateConfiguration": { + "type": "structure", + "members": { + "Level": { + "target": "com.amazonaws.fsx#LustreAccessAuditLogLevel", + "traits": { + "smithy.api#documentation": "

                                                                      Sets which data repository events are logged by Amazon FSx.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n WARN_ONLY - only warning events are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ERROR_ONLY - only error events are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WARN_ERROR - both warning events and error events\n are logged.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISABLED - logging of data repository events\n is turned off.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } + }, + "Destination": { + "target": "com.amazonaws.fsx#GeneralARN", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that specifies the destination of the logs.

                                                                      \n

                                                                      The destination can be any Amazon CloudWatch Logs log group ARN, with the following\n requirements:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        The destination ARN that you provide must be in the same Amazon Web Services partition,\n Amazon Web Services Region, and Amazon Web Services account as your Amazon FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The name of the Amazon CloudWatch Logs log group must begin with\n the /aws/fsx prefix.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        If you do not provide a destination, Amazon FSx will create and use a\n log stream in the CloudWatch Logs /aws/fsx/lustre log group.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        If Destination is provided and the resource does not\n exist, the request will fail with a BadRequest error.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        If Level is set to DISABLED, you cannot specify\n a destination in Destination.

                                                                        \n
                                                                      • \n
                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The Lustre logging configuration used when creating or updating an\n Amazon FSx for Lustre file system. Lustre logging writes the enabled\n logging events for your file system to Amazon CloudWatch Logs.

                                                                      \n

                                                                      Error and warning events can be logged from the following data\n repository operations:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Automatic export

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Data repository tasks

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      To learn more about Lustre logging, see \n Logging to Amazon CloudWatch Logs.

                                                                      " + } + }, "com.amazonaws.fsx#MaxResults": { "type": "integer", "traits": { @@ -4236,10 +5286,10 @@ "type": "integer", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Sustained throughput of an Amazon FSx file system in MBps.

                                                                      ", + "smithy.api#documentation": "

                                                                      The sustained throughput of an Amazon FSx file system in MBps.

                                                                      ", "smithy.api#range": { "min": 8, - "max": 2048 + "max": 4096 } } }, @@ -4267,6 +5317,16 @@ "smithy.api#error": "client" } }, + "com.amazonaws.fsx#Namespace": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 4096 + }, + "smithy.api#pattern": "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{3,4096}$" + } + }, "com.amazonaws.fsx#NetBiosAlias": { "type": "string", "traits": { @@ -4419,13 +5479,13 @@ "JunctionPath": { "target": "com.amazonaws.fsx#JunctionPath", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the directory that NAS clients use to mount the volume, along with the SVM DNS name or IP address.\n You can create a JunctionPath directly below a parent volume junction or on a \n directory within a volume. A JunctionPath for a volume named vol3 might \n be /vol1/vol2/vol3, or /vol1/dir2/vol3, or even /dir1/dir2/vol3..

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the directory that network-attached storage (NAS) clients use to mount the\n volume, along with the storage virtual machine (SVM) Domain Name System (DNS) name or IP\n address. You can create a JunctionPath directly below a parent volume\n junction or on a directory within a volume. A JunctionPath for a volume\n named vol3 might be /vol1/vol2/vol3, or\n /vol1/dir2/vol3, or even /dir1/dir2/vol3.

                                                                      " } }, "SecurityStyle": { "target": "com.amazonaws.fsx#SecurityStyle", "traits": { - "smithy.api#documentation": "

                                                                      The security style for the volume, which can be UNIX,\n NTFS, or MIXED.

                                                                      " + "smithy.api#documentation": "

                                                                      The security style for the volume, which can be UNIX, NTFS,\n or\n MIXED.

                                                                      " } }, "SizeInMegabytes": { @@ -4449,7 +5509,7 @@ "StorageVirtualMachineRoot": { "target": "com.amazonaws.fsx#Flag", "traits": { - "smithy.api#documentation": "

                                                                      A boolean flag indicating whether this volume is the root volume for\n its storage virtual machine (SVM). Only one volume on an SVM can be the\n root volume. This value defaults to false. If this value is true, then\n this is the SVM root volume.

                                                                      \n

                                                                      This flag is useful when you're deleting an SVM, because you must\n first delete all non-root volumes. This flag, when set to false, helps\n you identify which volumes to delete before you can delete the SVM.

                                                                      " + "smithy.api#documentation": "

                                                                      A Boolean flag indicating whether this volume is the root volume for its storage\n virtual machine (SVM). Only one volume on an SVM can be the root volume. This value\n defaults to false. If this value is true, then this is the SVM\n root volume.

                                                                      \n

                                                                      This flag is useful when you're deleting an SVM, because you must first delete all\n non-root volumes. This flag, when set to false, helps you identify which\n volumes to delete before you can delete the SVM.

                                                                      " } }, "TieringPolicy": { @@ -4461,18 +5521,18 @@ "UUID": { "target": "com.amazonaws.fsx#UUID", "traits": { - "smithy.api#documentation": "

                                                                      The volume's UUID (universally unique identifier).

                                                                      " + "smithy.api#documentation": "

                                                                      The volume's universally unique identifier (UUID).

                                                                      " } }, "OntapVolumeType": { "target": "com.amazonaws.fsx#OntapVolumeType", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the type of volume. Valid values are the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n RW specifies a read-write volume.\n RW is the default.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DP specifies a data protection volume. You can\n protect data by replicating it to data protection mirror copies and use\n data protection mirror copies to recover data when a disaster occurs.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LS specifies a load-sharing mirror volume.\n A load-sharing mirror reduces the network traffic to a FlexVol volume\n by providing additional read-only access to clients.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the type of volume. Valid values are the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n RW specifies a read/write volume. RW is the default.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DP specifies a data-protection volume. You can protect data by replicating it to\n data-protection mirror copies. If a disaster occurs, you can use these\n data-protection mirror copies to recover data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LS specifies a load-sharing mirror volume. A load-sharing mirror reduces the\n network traffic to a FlexVol volume by providing additional read-only access to\n clients.

                                                                        \n
                                                                      • \n
                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The configuration of an Amazon FSx for NetApp ONTAP volume

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration of an Amazon FSx for NetApp ONTAP volume.

                                                                      " } }, "com.amazonaws.fsx#OntapVolumeType": { @@ -4494,143 +5554,666 @@ ] } }, - "com.amazonaws.fsx#OrganizationalUnitDistinguishedName": { - "type": "string", - "traits": { - "smithy.api#length": { - "min": 1, - "max": 2000 + "com.amazonaws.fsx#OpenZFSClientConfiguration": { + "type": "structure", + "members": { + "Clients": { + "target": "com.amazonaws.fsx#OpenZFSClients", + "traits": { + "smithy.api#documentation": "

                                                                      A value that specifies who can mount the file system. You can provide a wildcard\n character (*), an IP address (0.0.0.0), or a CIDR address\n (192.0.2.0/24. By default, Amazon FSx uses the wildcard\n character when specifying the client.

                                                                      ", + "smithy.api#required": {} + } }, - "smithy.api#pattern": "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,2000}$" - } - }, - "com.amazonaws.fsx#Parameter": { - "type": "string", - "traits": { - "smithy.api#documentation": "

                                                                      The name of a parameter for the request. Parameter names are returned in\n PascalCase.

                                                                      ", - "smithy.api#length": { - "min": 1 + "Options": { + "target": "com.amazonaws.fsx#OpenZFSNfsExportOptions", + "traits": { + "smithy.api#documentation": "

                                                                      The options to use when mounting the file system. For a list of options that you can\n use with Network File System (NFS), see the exports(5) - Linux man page. When\n choosing your options, consider the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n crossmount is used by default. If you don't specify\n crossmount when changing the client configuration, you won't be\n able to see or access snapshots in your file system's snapshot directory.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n sync is used by default. If you instead specify\n async, the system acknowledges writes before writing to disk.\n If the system crashes before the writes are finished, you lose the unwritten\n data.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } } - } - }, - "com.amazonaws.fsx#PerUnitStorageThroughput": { - "type": "integer", + }, "traits": { - "smithy.api#box": {}, - "smithy.api#range": { - "min": 12, - "max": 200 - } + "smithy.api#documentation": "

                                                                      Specifies who can mount the file system and the options that can be used while\n mounting the file system.

                                                                      " } }, - "com.amazonaws.fsx#ProgressPercent": { - "type": "integer", + "com.amazonaws.fsx#OpenZFSClientConfigurations": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#OpenZFSClientConfiguration" + }, "traits": { - "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The current percent of progress of an asynchronous task.

                                                                      ", - "smithy.api#range": { + "smithy.api#length": { "min": 0, - "max": 100 + "max": 25 } } }, - "com.amazonaws.fsx#Region": { + "com.amazonaws.fsx#OpenZFSClients": { "type": "string", "traits": { "smithy.api#length": { "min": 1, - "max": 20 + "max": 128 }, - "smithy.api#pattern": "^[a-z0-9-]{1,20}$" + "smithy.api#pattern": "^[ -~]{1,128}$" } }, - "com.amazonaws.fsx#ReportFormat": { + "com.amazonaws.fsx#OpenZFSCopyStrategy": { "type": "string", "traits": { "smithy.api#enum": [ { - "value": "REPORT_CSV_20191124", - "name": "REPORT_CSV_20191124" - } - ] - } - }, - "com.amazonaws.fsx#ReportScope": { - "type": "string", - "traits": { - "smithy.api#enum": [ + "value": "CLONE", + "name": "CLONE" + }, { - "value": "FAILED_FILES_ONLY", - "name": "FAILED_FILES_ONLY" + "value": "FULL_COPY", + "name": "FULL_COPY" } ] } }, - "com.amazonaws.fsx#RequestTime": { - "type": "timestamp" - }, - "com.amazonaws.fsx#ResourceARN": { - "type": "string", - "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services\n resources. We require an ARN when you need to specify a resource unambiguously across\n all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in\n the Amazon Web Services General Reference.

                                                                      ", - "smithy.api#length": { - "min": 8, - "max": 512 - }, + "com.amazonaws.fsx#OpenZFSCreateRootVolumeConfiguration": { + "type": "structure", + "members": { + "DataCompressionType": { + "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Doesn't compress the data on the volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ZSTD - Compresses the data in the volume using the ZStandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "NfsExports": { + "target": "com.amazonaws.fsx#OpenZFSNfsExports", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for mounting a file system.

                                                                      " + } + }, + "UserAndGroupQuotas": { + "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", + "traits": { + "smithy.api#documentation": "

                                                                      An object specifying how much storage users or groups can use on the volume.

                                                                      " + } + }, + "CopyTagsToSnapshots": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                                                                      " + } + }, + "ReadOnly": { + "target": "com.amazonaws.fsx#ReadOnly", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether the volume is read-only. Setting this value to\n true can be useful after you have completed changes to a volume and no\n longer want changes to occur.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of an Amazon FSx for OpenZFS root volume.

                                                                      " + } + }, + "com.amazonaws.fsx#OpenZFSDataCompressionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "ZSTD", + "name": "ZSTD" + } + ] + } + }, + "com.amazonaws.fsx#OpenZFSDeploymentType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SINGLE_AZ_1", + "name": "SINGLE_AZ_1" + } + ] + } + }, + "com.amazonaws.fsx#OpenZFSFileSystemConfiguration": { + "type": "structure", + "members": { + "AutomaticBackupRetentionDays": { + "target": "com.amazonaws.fsx#AutomaticBackupRetentionDays" + }, + "CopyTagsToBackups": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags on the file system should be copied to\n backups.\n If it's set to true, all tags on the file system are copied to all\n automatic backups and any user-initiated backups where the user doesn't specify any\n tags. If this value is true and you specify one or more tags, only the\n specified tags are copied to backups. If you specify one or more tags when creating a\n user-initiated backup, no tags are copied from the file system, regardless of this\n value.

                                                                      " + } + }, + "CopyTagsToVolumes": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                                                                      " + } + }, + "DailyAutomaticBackupStartTime": { + "target": "com.amazonaws.fsx#DailyTime" + }, + "DeploymentType": { + "target": "com.amazonaws.fsx#OpenZFSDeploymentType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the file-system deployment type. Amazon FSx for OpenZFS supports\n SINGLE_AZ_1. SINGLE_AZ_1 is a file system configured for a\n single Availability Zone (AZ) of redundancy.

                                                                      " + } + }, + "ThroughputCapacity": { + "target": "com.amazonaws.fsx#MegabytesPerSecond", + "traits": { + "smithy.api#documentation": "

                                                                      The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

                                                                      " + } + }, + "WeeklyMaintenanceStartTime": { + "target": "com.amazonaws.fsx#WeeklyTime" + }, + "DiskIopsConfiguration": { + "target": "com.amazonaws.fsx#DiskIopsConfiguration" + }, + "RootVolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the root volume of the OpenZFS file system.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for the Amazon FSx for OpenZFS file system.

                                                                      " + } + }, + "com.amazonaws.fsx#OpenZFSNfsExport": { + "type": "structure", + "members": { + "ClientConfigurations": { + "target": "com.amazonaws.fsx#OpenZFSClientConfigurations", + "traits": { + "smithy.api#documentation": "

                                                                      A list of configuration objects that contain the client and options for mounting the\n OpenZFS file system.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The Network File System NFS) configurations for mounting an Amazon FSx for\n OpenZFS file system.

                                                                      " + } + }, + "com.amazonaws.fsx#OpenZFSNfsExportOption": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[ -~]{1,128}$" + } + }, + "com.amazonaws.fsx#OpenZFSNfsExportOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#OpenZFSNfsExportOption" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + } + } + }, + "com.amazonaws.fsx#OpenZFSNfsExports": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#OpenZFSNfsExport" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.fsx#OpenZFSOriginSnapshotConfiguration": { + "type": "structure", + "members": { + "SnapshotARN": { + "target": "com.amazonaws.fsx#ResourceARN" + }, + "CopyStrategy": { + "target": "com.amazonaws.fsx#OpenZFSCopyStrategy", + "traits": { + "smithy.api#documentation": "

                                                                      The strategy used when copying data from the snapshot to the new volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CLONE - The new volume references the data in the origin\n snapshot. Cloning a snapshot is faster than copying the data from a snapshot to\n a new volume and doesn't consume disk throughput. However, the origin snapshot\n can't be deleted if there is a volume using its copied data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FULL_COPY - Copies all data from the snapshot to the new volume.\n

                                                                        \n
                                                                      • \n
                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The snapshot configuration to use when creating an OpenZFS volume from a\n snapshot.

                                                                      " + } + }, + "com.amazonaws.fsx#OpenZFSQuotaType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "USER", + "name": "USER" + }, + { + "value": "GROUP", + "name": "GROUP" + } + ] + } + }, + "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#OpenZFSUserOrGroupQuota" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.fsx#OpenZFSUserOrGroupQuota": { + "type": "structure", + "members": { + "Type": { + "target": "com.amazonaws.fsx#OpenZFSQuotaType", + "traits": { + "smithy.api#documentation": "

                                                                      A value that specifies whether the quota applies to a user or group.

                                                                      ", + "smithy.api#required": {} + } + }, + "Id": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the user or group.

                                                                      ", + "smithy.api#required": {} + } + }, + "StorageCapacityQuotaGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The amount of storage that the user or group can use in gibibytes (GiB).

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for how much storage a user or group can use on the volume.

                                                                      " + } + }, + "com.amazonaws.fsx#OpenZFSVolumeConfiguration": { + "type": "structure", + "members": { + "ParentVolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the parent volume.

                                                                      " + } + }, + "VolumePath": { + "target": "com.amazonaws.fsx#VolumePath", + "traits": { + "smithy.api#documentation": "

                                                                      The path to the volume from the root volume. For example,\n fsx/parentVolume/volume1.

                                                                      " + } + }, + "StorageCapacityReservationGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't\n reserve more storage than the parent volume has reserved.

                                                                      " + } + }, + "StorageCapacityQuotaGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum amount of storage in gibibtyes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                                                                      " + } + }, + "DataCompressionType": { + "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", + "traits": { + "smithy.api#documentation": "

                                                                      The method used to compress the data on the volume. Unless a compression type is\n specified, volumes inherit the DataCompressionType value of their parent\n volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Doesn't compress the data on the volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "CopyTagsToSnapshots": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                                                                      " + } + }, + "OriginSnapshot": { + "target": "com.amazonaws.fsx#OpenZFSOriginSnapshotConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object that specifies the snapshot to use as the origin of the data\n for the volume.

                                                                      " + } + }, + "ReadOnly": { + "target": "com.amazonaws.fsx#ReadOnly", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether the volume is read-only.

                                                                      " + } + }, + "NfsExports": { + "target": "com.amazonaws.fsx#OpenZFSNfsExports", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for mounting a Network File System (NFS) file\n system.

                                                                      " + } + }, + "UserAndGroupQuotas": { + "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", + "traits": { + "smithy.api#documentation": "

                                                                      An object specifying how much storage users or groups can use on the volume.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of an Amazon FSx for OpenZFS volume.

                                                                      " + } + }, + "com.amazonaws.fsx#OrganizationalUnitDistinguishedName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2000 + }, + "smithy.api#pattern": "^[^\\u0000\\u0085\\u2028\\u2029\\r\\n]{1,2000}$" + } + }, + "com.amazonaws.fsx#Parameter": { + "type": "string", + "traits": { + "smithy.api#documentation": "

                                                                      The name of a parameter for the request. Parameter names are returned in\n PascalCase.

                                                                      ", + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.fsx#PerUnitStorageThroughput": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 12, + "max": 1000 + } + } + }, + "com.amazonaws.fsx#ProgressPercent": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#documentation": "

                                                                      The current percent of progress of an asynchronous task.

                                                                      ", + "smithy.api#range": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.fsx#ReadOnly": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.fsx#Region": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 20 + }, + "smithy.api#pattern": "^[a-z0-9-]{1,20}$" + } + }, + "com.amazonaws.fsx#ReleaseFileSystemNfsV3Locks": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#ReleaseFileSystemNfsV3LocksRequest" + }, + "output": { + "target": "com.amazonaws.fsx#ReleaseFileSystemNfsV3LocksResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#FileSystemNotFound" + }, + { + "target": "com.amazonaws.fsx#IncompatibleParameterError" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Releases the file system lock from an Amazon FSx for OpenZFS file\n system.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#ReleaseFileSystemNfsV3LocksRequest": { + "type": "structure", + "members": { + "FileSystemId": { + "target": "com.amazonaws.fsx#FileSystemId", + "traits": { + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.fsx#ReleaseFileSystemNfsV3LocksResponse": { + "type": "structure", + "members": { + "FileSystem": { + "target": "com.amazonaws.fsx#FileSystem" + } + } + }, + "com.amazonaws.fsx#ReportFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "REPORT_CSV_20191124", + "name": "REPORT_CSV_20191124" + } + ] + } + }, + "com.amazonaws.fsx#ReportScope": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FAILED_FILES_ONLY", + "name": "FAILED_FILES_ONLY" + } + ] + } + }, + "com.amazonaws.fsx#RequestTime": { + "type": "timestamp" + }, + "com.amazonaws.fsx#ResourceARN": { + "type": "string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for a given resource. ARNs uniquely identify Amazon Web Services\n resources. We require an ARN when you need to specify a resource unambiguously across\n all of Amazon Web Services. For more information, see Amazon Resource Names (ARNs) in\n the Amazon Web Services General Reference.

                                                                      ", + "smithy.api#length": { + "min": 8, + "max": 512 + }, "smithy.api#pattern": "^arn:(?=[^:]+:fsx:[^:]+:\\d{12}:)((|(?=[a-z0-9-.]{1,63})(?!\\d{1,3}(\\.\\d{1,3}){3})(?![^:]*-{2})(?![^:]*-\\.)(?![^:]*\\.-)[a-z0-9].*(?The Amazon Resource Name (ARN) of the resource that doesn't support\n tagging.

                                                                      ", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.fsx#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The resource specified does not support tagging.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.fsx#ResourceNotFound": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.fsx#ResourceARN", + "traits": { + "smithy.api#documentation": "

                                                                      The resource ARN of the resource that can't be found.

                                                                      ", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.fsx#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The resource specified by the Amazon Resource Name (ARN) can't be found.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.fsx#ResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FILE_SYSTEM", + "name": "FILE_SYSTEM" + }, + { + "value": "VOLUME", + "name": "VOLUME" + } + ] + } + }, + "com.amazonaws.fsx#RestoreOpenZFSVolumeOption": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DELETE_INTERMEDIATE_SNAPSHOTS", + "name": "DELETE_INTERMEDIATE_SNAPSHOTS" + }, + { + "value": "DELETE_CLONED_VOLUMES", + "name": "DELETE_CLONED_VOLUMES" + } + ] + } + }, + "com.amazonaws.fsx#RestoreOpenZFSVolumeOptions": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#RestoreOpenZFSVolumeOption" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2 + } + } + }, + "com.amazonaws.fsx#RestoreVolumeFromSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#RestoreVolumeFromSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.fsx#RestoreVolumeFromSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#VolumeNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns an Amazon FSx for OpenZFS volume to the state saved by the specified\n snapshot.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#RestoreVolumeFromSnapshotRequest": { "type": "structure", "members": { - "ResourceARN": { - "target": "com.amazonaws.fsx#ResourceARN", + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource that doesn't support\n tagging.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + }, + "VolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the volume that you are restoring.

                                                                      ", "smithy.api#required": {} } }, - "Message": { - "target": "com.amazonaws.fsx#ErrorMessage" + "SnapshotId": { + "target": "com.amazonaws.fsx#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the source snapshot. Specifies the snapshot that you are restoring\n from.

                                                                      ", + "smithy.api#required": {} + } + }, + "Options": { + "target": "com.amazonaws.fsx#RestoreOpenZFSVolumeOptions", + "traits": { + "smithy.api#documentation": "

                                                                      The settings used when restoring the specified volume from snapshot.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n DELETE_INTERMEDIATE_SNAPSHOTS - Deletes snapshots between the\n current state and the specified snapshot. If there are intermediate snapshots\n and this option isn't used, RestoreVolumeFromSnapshot fails.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETE_CLONED_VOLUMES - Deletes any volumes cloned from this\n volume. If there are any cloned volumes and this option isn't used,\n RestoreVolumeFromSnapshot fails.

                                                                        \n
                                                                      • \n
                                                                      " + } } - }, - "traits": { - "smithy.api#documentation": "

                                                                      The resource specified does not support tagging.

                                                                      ", - "smithy.api#error": "client" } }, - "com.amazonaws.fsx#ResourceNotFound": { + "com.amazonaws.fsx#RestoreVolumeFromSnapshotResponse": { "type": "structure", "members": { - "ResourceARN": { - "target": "com.amazonaws.fsx#ResourceARN", + "VolumeId": { + "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                                                                      The resource ARN of the resource that can't be found.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The ID of the volume that you restored.

                                                                      " } }, - "Message": { - "target": "com.amazonaws.fsx#ErrorMessage" - } - }, - "traits": { - "smithy.api#documentation": "

                                                                      The resource specified by the Amazon Resource Name (ARN) can't be found.

                                                                      ", - "smithy.api#error": "client" - } - }, - "com.amazonaws.fsx#ResourceType": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "FILE_SYSTEM", - "name": "FILE_SYSTEM" - }, - { - "value": "VOLUME", - "name": "VOLUME" + "Lifecycle": { + "target": "com.amazonaws.fsx#VolumeLifecycle", + "traits": { + "smithy.api#documentation": "

                                                                      The lifecycle state of the volume being restored.

                                                                      " } - ] + } } }, "com.amazonaws.fsx#RouteTableId": { @@ -4655,10 +6238,30 @@ } } }, + "com.amazonaws.fsx#S3DataRepositoryConfiguration": { + "type": "structure", + "members": { + "AutoImportPolicy": { + "target": "com.amazonaws.fsx#AutoImportPolicy", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the type of updated objects (new, changed, deleted)\n that will be automatically imported from the linked S3 bucket\n to your file system.

                                                                      " + } + }, + "AutoExportPolicy": { + "target": "com.amazonaws.fsx#AutoExportPolicy", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the type of updated objects (new, changed, deleted)\n that will be automatically exported from your file system to\n the linked S3 bucket.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for an Amazon S3 data repository linked to an\n Amazon FSx Lustre file system with a data repository association.\n The configuration consists of an AutoImportPolicy that\n defines file events on the data repository are automatically\n imported to the file system and an AutoExportPolicy\n that defines which file events on the file system are automatically\n exported to the data repository. File events are when files or\n directories are added, changed, or deleted on the file system or\n the data repository.

                                                                      " + } + }, "com.amazonaws.fsx#SecurityGroupId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID of your Amazon EC2 security group. This ID is used to control network access\n to the endpoint that Amazon FSx creates on your behalf in each subnet. For more\n information, see Amazon EC2 Security\n Groups for Linux Instances in the Amazon EC2 User\n Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of your Amazon EC2 security group. This ID is used to control network access\n to the endpoint that Amazon FSx creates on your behalf in each subnet. For more\n information, see Amazon EC2 Security\n groups for Linux instances in the Amazon EC2 User\n Guide.

                                                                      ", "smithy.api#length": { "min": 11, "max": 20 @@ -4810,69 +6413,264 @@ "smithy.api#documentation": "

                                                                      The configuration that Amazon FSx uses to join the Windows File Server instance to a\n self-managed Microsoft Active Directory (AD) directory.

                                                                      " } }, - "com.amazonaws.fsx#ServiceLimit": { + "com.amazonaws.fsx#ServiceLimit": { + "type": "string", + "traits": { + "smithy.api#documentation": "

                                                                      The types of limits on your service utilization. Limits include file system count,\n total throughput capacity, total storage, and total user-initiated backups. These limits\n apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by\n contacting Amazon Web Services Support.

                                                                      ", + "smithy.api#enum": [ + { + "value": "FILE_SYSTEM_COUNT", + "name": "FILE_SYSTEM_COUNT" + }, + { + "value": "TOTAL_THROUGHPUT_CAPACITY", + "name": "TOTAL_THROUGHPUT_CAPACITY" + }, + { + "value": "TOTAL_STORAGE", + "name": "TOTAL_STORAGE" + }, + { + "value": "TOTAL_USER_INITIATED_BACKUPS", + "name": "TOTAL_USER_INITIATED_BACKUPS" + }, + { + "value": "TOTAL_USER_TAGS", + "name": "TOTAL_USER_TAGS" + }, + { + "value": "TOTAL_IN_PROGRESS_COPY_BACKUPS", + "name": "TOTAL_IN_PROGRESS_COPY_BACKUPS" + }, + { + "value": "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM", + "name": "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM" + }, + { + "value": "VOLUMES_PER_FILE_SYSTEM", + "name": "VOLUMES_PER_FILE_SYSTEM" + }, + { + "value": "TOTAL_SSD_IOPS", + "name": "TOTAL_SSD_IOPS" + } + ] + } + }, + "com.amazonaws.fsx#ServiceLimitExceeded": { + "type": "structure", + "members": { + "Limit": { + "target": "com.amazonaws.fsx#ServiceLimit", + "traits": { + "smithy.api#documentation": "

                                                                      Enumeration of the service limit that was exceeded.

                                                                      ", + "smithy.api#required": {} + } + }, + "Message": { + "target": "com.amazonaws.fsx#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An error indicating that a particular service limit was exceeded. You can increase\n some service limits by contacting Amazon Web Services Support.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.fsx#Snapshot": { + "type": "structure", + "members": { + "ResourceARN": { + "target": "com.amazonaws.fsx#ResourceARN" + }, + "SnapshotId": { + "target": "com.amazonaws.fsx#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.fsx#SnapshotName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the snapshot.

                                                                      " + } + }, + "VolumeId": { + "target": "com.amazonaws.fsx#VolumeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the volume that the snapshot is of.

                                                                      " + } + }, + "CreationTime": { + "target": "com.amazonaws.fsx#CreationTime" + }, + "Lifecycle": { + "target": "com.amazonaws.fsx#SnapshotLifecycle", + "traits": { + "smithy.api#documentation": "

                                                                      The lifecycle status of the snapshot.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n PENDING - Amazon FSx hasn't started creating the\n snapshot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the snapshot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - Amazon FSx is deleting the snapshot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AVAILABLE - The snapshot is fully available.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.fsx#Tags" + }, + "AdministrativeActions": { + "target": "com.amazonaws.fsx#AdministrativeActions", + "traits": { + "smithy.api#documentation": "

                                                                      A list of administrative actions for the file system that are in process or waiting to\n be processed. Administrative actions describe changes to the Amazon FSx\n system.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A snapshot of an Amazon FSx for OpenZFS volume.

                                                                      " + } + }, + "com.amazonaws.fsx#SnapshotFilter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.fsx#SnapshotFilterName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the filter to use. You can filter by the file-system-id or by\n volume-id.

                                                                      " + } + }, + "Values": { + "target": "com.amazonaws.fsx#SnapshotFilterValues", + "traits": { + "smithy.api#documentation": "

                                                                      The file-system-id or volume-id that you are filtering\n for.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A filter used to restrict the results of DescribeSnapshots calls. You can\n use multiple filters to return results that meet all applied filter requirements.

                                                                      " + } + }, + "com.amazonaws.fsx#SnapshotFilterName": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "file-system-id", + "name": "FILE_SYSTEM_ID" + }, + { + "value": "volume-id", + "name": "VOLUME_ID" + } + ] + } + }, + "com.amazonaws.fsx#SnapshotFilterValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[0-9a-zA-Z\\*\\.\\\\/\\?\\-\\_]*$" + } + }, + "com.amazonaws.fsx#SnapshotFilterValues": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#SnapshotFilterValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 20 + } + } + }, + "com.amazonaws.fsx#SnapshotFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#SnapshotFilter" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2 + } + } + }, + "com.amazonaws.fsx#SnapshotId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 11, + "max": 28 + }, + "smithy.api#pattern": "^((fs)?volsnap-[0-9a-f]{8,})$" + } + }, + "com.amazonaws.fsx#SnapshotIds": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#SnapshotId" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.fsx#SnapshotLifecycle": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The types of limits on your service utilization. Limits include file system count,\n total throughput capacity, total storage, and total user-initiated backups. These limits\n apply for a specific account in a specific Amazon Web Services Region. You can increase some of them by\n contacting Amazon Web Services Support.

                                                                      ", "smithy.api#enum": [ { - "value": "FILE_SYSTEM_COUNT", - "name": "FILE_SYSTEM_COUNT" - }, - { - "value": "TOTAL_THROUGHPUT_CAPACITY", - "name": "TOTAL_THROUGHPUT_CAPACITY" - }, - { - "value": "TOTAL_STORAGE", - "name": "TOTAL_STORAGE" - }, - { - "value": "TOTAL_USER_INITIATED_BACKUPS", - "name": "TOTAL_USER_INITIATED_BACKUPS" - }, - { - "value": "TOTAL_USER_TAGS", - "name": "TOTAL_USER_TAGS" - }, - { - "value": "TOTAL_IN_PROGRESS_COPY_BACKUPS", - "name": "TOTAL_IN_PROGRESS_COPY_BACKUPS" + "value": "PENDING", + "name": "PENDING" }, { - "value": "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM", - "name": "STORAGE_VIRTUAL_MACHINES_PER_FILE_SYSTEM" + "value": "CREATING", + "name": "CREATING" }, { - "value": "VOLUMES_PER_FILE_SYSTEM", - "name": "VOLUMES_PER_FILE_SYSTEM" + "value": "DELETING", + "name": "DELETING" }, { - "value": "TOTAL_SSD_IOPS", - "name": "TOTAL_SSD_IOPS" + "value": "AVAILABLE", + "name": "AVAILABLE" } ] } }, - "com.amazonaws.fsx#ServiceLimitExceeded": { + "com.amazonaws.fsx#SnapshotName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 203 + }, + "smithy.api#pattern": "^[a-zA-Z0-9_:.-]{1,203}$" + } + }, + "com.amazonaws.fsx#SnapshotNotFound": { "type": "structure", "members": { - "Limit": { - "target": "com.amazonaws.fsx#ServiceLimit", - "traits": { - "smithy.api#documentation": "

                                                                      Enumeration of the service limit that was exceeded.

                                                                      ", - "smithy.api#required": {} - } - }, "Message": { "target": "com.amazonaws.fsx#ErrorMessage" } }, "traits": { - "smithy.api#documentation": "

                                                                      An error indicating that a particular service limit was exceeded. You can increase\n some service limits by contacting Amazon Web Services Support.

                                                                      ", + "smithy.api#documentation": "

                                                                      No Amazon FSx snapshots were found based on the supplied parameters.

                                                                      ", "smithy.api#error": "client" } }, + "com.amazonaws.fsx#Snapshots": { + "type": "list", + "member": { + "target": "com.amazonaws.fsx#Snapshot" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, "com.amazonaws.fsx#SourceBackupId": { "type": "string", "traits": { @@ -4894,7 +6692,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The request was rejected because the lifecycle status of the \n source backup is not AVAILABLE.

                                                                      ", + "smithy.api#documentation": "

                                                                      The request was rejected because the lifecycle status of the source backup isn't\n AVAILABLE.

                                                                      ", "smithy.api#error": "client" } }, @@ -5226,7 +7024,7 @@ "com.amazonaws.fsx#SubnetId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID for a subnet. A subnet is a range of IP addresses in\n your virtual private cloud (VPC). For more information, see VPC and Subnets in the\n Amazon VPC User Guide.\n

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID for a subnet. A subnet is a range of IP addresses in\n your virtual private cloud (VPC). For more information, see VPC and subnets in the\n Amazon VPC User Guide.\n

                                                                      ", "smithy.api#length": { "min": 15, "max": 24 @@ -5600,6 +7398,77 @@ "smithy.api#documentation": "

                                                                      The response object for UntagResource action.

                                                                      " } }, + "com.amazonaws.fsx#UpdateDataRepositoryAssociation": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#UpdateDataRepositoryAssociationRequest" + }, + "output": { + "target": "com.amazonaws.fsx#UpdateDataRepositoryAssociationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#DataRepositoryAssociationNotFound" + }, + { + "target": "com.amazonaws.fsx#IncompatibleParameterError" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#ServiceLimitExceeded" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the configuration of an existing data repository association\n on an Amazon FSx for Lustre file system. Data repository associations are\n supported only for file systems with the Persistent_2 deployment type.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#UpdateDataRepositoryAssociationRequest": { + "type": "structure", + "members": { + "AssociationId": { + "target": "com.amazonaws.fsx#DataRepositoryAssociationId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the data repository association that you are updating.

                                                                      ", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "ImportedFileChunkSize": { + "target": "com.amazonaws.fsx#Megabytes", + "traits": { + "smithy.api#documentation": "

                                                                      For files imported from a data repository, this value determines the stripe count and\n maximum amount of data per file (in MiB) stored on a single physical disk. The maximum\n number of disks that a single file can be striped across is limited by the total number\n of disks that make up the file system.

                                                                      \n \n

                                                                      The default chunk size is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500\n GiB). Amazon S3 objects have a maximum size of 5 TB.

                                                                      " + } + }, + "S3": { + "target": "com.amazonaws.fsx#S3DataRepositoryConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration for an Amazon S3 data repository linked to an\n Amazon FSx Lustre file system with a data repository association.\n The configuration defines which file events (new, changed, or\n deleted files or directories) are automatically imported from\n the linked data repository to the file system or automatically\n exported from the file system to the data repository.

                                                                      " + } + } + } + }, + "com.amazonaws.fsx#UpdateDataRepositoryAssociationResponse": { + "type": "structure", + "members": { + "Association": { + "target": "com.amazonaws.fsx#DataRepositoryAssociation", + "traits": { + "smithy.api#documentation": "

                                                                      The response object returned after the data repository association is updated.

                                                                      " + } + } + } + }, "com.amazonaws.fsx#UpdateFileSystem": { "type": "operation", "input": { @@ -5632,7 +7501,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Use this operation to update the configuration of an existing Amazon FSx file system. \n You can update multiple properties in a single request.

                                                                      \n

                                                                      For Amazon FSx for Windows File Server file systems, you can update the following \n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        AuditLogConfiguration

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        AutomaticBackupRetentionDays

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DailyAutomaticBackupStartTime

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        SelfManagedActiveDirectoryConfiguration

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        StorageCapacity

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ThroughputCapacity

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        WeeklyMaintenanceStartTime

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For Amazon FSx for Lustre file systems, you can update the following \n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        AutoImportPolicy

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        AutomaticBackupRetentionDays

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DailyAutomaticBackupStartTime

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DataCompressionType

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        StorageCapacity

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        WeeklyMaintenanceStartTime

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For Amazon FSx for NetApp ONTAP file systems, you can update the following\n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        AutomaticBackupRetentionDays

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DailyAutomaticBackupStartTime

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        FsxAdminPassword

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        WeeklyMaintenanceStartTime

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Use this operation to update the configuration of an existing Amazon FSx file\n system. You can update multiple properties in a single request.

                                                                      \n

                                                                      For Amazon FSx for Windows File Server file systems, you can update the following\n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AuditLogConfiguration\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AutomaticBackupRetentionDays\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DailyAutomaticBackupStartTime\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n SelfManagedActiveDirectoryConfiguration\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n StorageCapacity\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ThroughputCapacity\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WeeklyMaintenanceStartTime\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For FSx for Lustre file systems, you can update the following\n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AutoImportPolicy\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AutomaticBackupRetentionDays\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DailyAutomaticBackupStartTime\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DataCompressionType\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n StorageCapacity\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WeeklyMaintenanceStartTime\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For FSx for ONTAP file systems, you can update the following\n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AutomaticBackupRetentionDays\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DailyAutomaticBackupStartTime\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FsxAdminPassword\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WeeklyMaintenanceStartTime\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For the Amazon FSx for OpenZFS file systems, you can update the following\n properties:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AutomaticBackupRetentionDays\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CopyTagsToBackups\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CopyTagsToVolumes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DailyAutomaticBackupStartTime\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DiskIopsConfiguration\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ThroughputCapacity\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WeeklyMaintenanceStartTime\n

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.fsx#UpdateFileSystemLustreConfiguration": { @@ -5653,7 +7522,7 @@ "AutoImportPolicy": { "target": "com.amazonaws.fsx#AutoImportPolicyType", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listing up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update the file and directory \n listing for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option. \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Automatically import updates from your S3 bucket.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) When you create your file system, your existing S3 objects appear as file and directory listings. \n Use this property to choose how Amazon FSx keeps your file and directory listing up to date \n as you add or modify objects in your linked S3 bucket. AutoImportPolicy can\n have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - (Default) AutoImport is off. Amazon FSx only updates \n file and directory listings from the linked S3 bucket \n when the file system is created. FSx does not update the file and directory \n listing for any new or changed objects after choosing this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW - AutoImport is on. Amazon FSx automatically imports\n directory listings of any new objects added to the linked S3 bucket that \n do not currently exist in the FSx file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED - AutoImport is on. Amazon FSx automatically imports \n file and directory listings of any new objects added to the S3 bucket and any \n existing objects that are changed in the S3 bucket after you choose this option.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NEW_CHANGED_DELETED - AutoImport is on. Amazon FSx automatically\n imports file and directory listings of any new objects added to the S3 bucket, any \n existing objects that are changed in the S3 bucket, and any objects that were deleted\n in the S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      The AutoImportPolicy parameter is not supported for Lustre file systems with\n the Persistent_2 deployment type. Instead, use \n to update a data repository association on your Persistent_2 file system.

                                                                      " } }, "DataCompressionType": { @@ -5661,6 +7530,12 @@ "traits": { "smithy.api#documentation": "

                                                                      Sets the data compression configuration for the file system. DataCompressionType\n can have the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Data compression is turned off for\n the file system.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n LZ4 - Data compression is turned on with the LZ4\n algorithm.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      If you don't use DataCompressionType, the file system retains\n its current data compression configuration.

                                                                      \n

                                                                      For more information, see Lustre data compression.

                                                                      " } + }, + "LogConfiguration": { + "target": "com.amazonaws.fsx#LustreLogCreateConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The Lustre logging configuration used when updating an Amazon FSx for Lustre\n file system. When logging is enabled, Lustre logs error and warning events for\n data repositories associated with your file system to Amazon CloudWatch Logs.

                                                                      " + } } }, "traits": { @@ -5690,33 +7565,71 @@ "smithy.api#documentation": "

                                                                      The configuration updates for an Amazon FSx for NetApp ONTAP file system.

                                                                      " } }, + "com.amazonaws.fsx#UpdateFileSystemOpenZFSConfiguration": { + "type": "structure", + "members": { + "AutomaticBackupRetentionDays": { + "target": "com.amazonaws.fsx#AutomaticBackupRetentionDays" + }, + "CopyTagsToBackups": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the file system should be copied to\n backups. This value defaults to false. If it's set to true,\n all tags for the file system are copied to all automatic and user-initiated backups\n where the user doesn't specify tags. If this value is true and you specify\n one or more tags, only the specified tags are copied to backups. If you specify one or\n more tags when creating a user-initiated backup, no tags are copied from the file\n system, regardless of this value.

                                                                      " + } + }, + "CopyTagsToVolumes": { + "target": "com.amazonaws.fsx#Flag", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether tags for the volume should be copied to snapshots.\n This value defaults to false. If it's set to true, all tags\n for the volume are copied to snapshots where the user doesn't specify tags. If this\n value is true and you specify one or more tags, only the specified tags are\n copied to snapshots. If you specify one or more tags when creating the snapshot, no tags\n are copied from the volume, regardless of this value.

                                                                      " + } + }, + "DailyAutomaticBackupStartTime": { + "target": "com.amazonaws.fsx#DailyTime" + }, + "ThroughputCapacity": { + "target": "com.amazonaws.fsx#MegabytesPerSecond", + "traits": { + "smithy.api#documentation": "

                                                                      The throughput of an Amazon FSx file system, measured in megabytes per second\n (MBps), in 2 to the nth increments, between 2^3 (8) and 2^11 (2048).

                                                                      " + } + }, + "WeeklyMaintenanceStartTime": { + "target": "com.amazonaws.fsx#WeeklyTime" + }, + "DiskIopsConfiguration": { + "target": "com.amazonaws.fsx#DiskIopsConfiguration" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration updates for an Amazon FSx for OpenZFS file system.

                                                                      " + } + }, "com.amazonaws.fsx#UpdateFileSystemRequest": { "type": "structure", "members": { "FileSystemId": { "target": "com.amazonaws.fsx#FileSystemId", "traits": { - "smithy.api#documentation": "

                                                                      Identifies the file system that you are updating.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the file system that you are updating.

                                                                      ", "smithy.api#required": {} } }, "ClientRequestToken": { "target": "com.amazonaws.fsx#ClientRequestToken", "traits": { - "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent updates. This string is automatically filled on your behalf when you use\n the Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", + "smithy.api#documentation": "

                                                                      A string of up to 64 ASCII characters that Amazon FSx uses to ensure\n idempotent updates. This string is automatically filled on your behalf when you use the\n Command Line Interface (CLI) or an Amazon Web Services SDK.

                                                                      ", "smithy.api#idempotencyToken": {} } }, "StorageCapacity": { "target": "com.amazonaws.fsx#StorageCapacity", "traits": { - "smithy.api#documentation": "

                                                                      Use this parameter to increase the storage capacity of an Amazon FSx for Windows File Server\n or Amazon FSx for Lustre file system.\n Specifies the storage capacity target value, GiB, to increase the storage capacity for the\n file system that you're updating. You cannot make a storage capacity increase request if\n there is an existing storage capacity increase request in progress.

                                                                      \n

                                                                      For Windows file systems, the storage capacity target value must be at least 10 percent\n (%) greater than the current storage capacity value. In order to increase storage capacity,\n the file system must have at least 16 MB/s of throughput capacity.

                                                                      \n

                                                                      For Lustre file systems, the storage capacity target value can be the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values\n are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for \n 12 MB/s/TiB file systems and multiples of 1800 GiB for 40 MB/s/TiB file systems. The values must be greater\n than the current storage capacity.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SCRATCH_1 file systems, you cannot increase the storage capacity.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Managing storage\n capacity in the Amazon FSx for Windows File Server User Guide\n and Managing storage and throughput capacity in the Amazon FSx for Lustre\n User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Use this parameter to increase the storage capacity of an Amazon FSx for Windows\n File Server or Amazon FSx for Lustre file system. Specifies the storage capacity\n target value, in GiB, to increase the storage capacity for the file system that you're\n updating.

                                                                      \n \n

                                                                      You can't make a storage capacity increase request if there is an existing storage\n capacity increase request in progress.

                                                                      \n
                                                                      \n

                                                                      For Windows file systems, the storage capacity target value must be at least 10 percent\n greater than the current storage capacity value. To increase storage capacity, the file system\n must have at least 16 MBps of throughput capacity.

                                                                      \n

                                                                      For Lustre file systems, the storage capacity target value can be the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For SCRATCH_2 and PERSISTENT_1 SSD deployment types, valid values\n are in multiples of 2400 GiB. The value must be greater than the current storage capacity.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For PERSISTENT HDD file systems, valid values are multiples of 6000 GiB for\n 12-MBps throughput per TiB file systems and multiples of 1800 GiB for 40-MBps throughput\n per TiB file systems. The values must be greater than the current storage capacity.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SCRATCH_1 file systems, you can't increase the storage capacity.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For OpenZFS file systems, the input/output operations per second (IOPS) automatically\n scale with increases to the storage capacity if IOPS is configured for automatic scaling. If\n the storage capacity increase would result in less than 3 IOPS per GiB of storage, this\n operation returns an error.

                                                                      \n

                                                                      For more information, see Managing storage\n capacity in the Amazon FSx for Windows File Server User\n Guide, Managing storage and throughput\n capacity in the Amazon FSx for Lustre User Guide, and\n Managing storage capacity in the Amazon FSx for OpenZFS User\n Guide.

                                                                      " } }, "WindowsConfiguration": { "target": "com.amazonaws.fsx#UpdateFileSystemWindowsConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The configuration updates for an Amazon FSx for Windows File Server file system.

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration updates for an Amazon FSx for Windows File Server file\n system.

                                                                      " } }, "LustreConfiguration": { @@ -5724,6 +7637,12 @@ }, "OntapConfiguration": { "target": "com.amazonaws.fsx#UpdateFileSystemOntapConfiguration" + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#UpdateFileSystemOpenZFSConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration updates for an Amazon FSx for OpenZFS file system.

                                                                      " + } } }, "traits": { @@ -5826,6 +7745,110 @@ "smithy.api#documentation": "

                                                                      Used to specify changes to the ONTAP configuration for the volume you are updating.

                                                                      " } }, + "com.amazonaws.fsx#UpdateOpenZFSVolumeConfiguration": { + "type": "structure", + "members": { + "StorageCapacityReservationGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      The amount of storage in gibibytes (GiB) to reserve from the parent volume. You can't\n reserve more storage than the parent volume has reserved.

                                                                      " + } + }, + "StorageCapacityQuotaGiB": { + "target": "com.amazonaws.fsx#IntegerNoMax", + "traits": { + "smithy.api#documentation": "

                                                                      \n

                                                                      The maximum amount of storage in gibibytes (GiB) that the volume can use from its\n parent. You can specify a quota larger than the storage on the parent volume.

                                                                      " + } + }, + "DataCompressionType": { + "target": "com.amazonaws.fsx#OpenZFSDataCompressionType", + "traits": { + "smithy.api#documentation": "

                                                                      \n

                                                                      Specifies the method used to compress the data on the volume. Unless the compression\n type is specified, volumes inherit the DataCompressionType value of their\n parent volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE - Doesn't compress the data on the volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ZSTD - Compresses the data in the volume using the Zstandard\n (ZSTD) compression algorithm. This algorithm reduces the amount of space used on\n your volume and has very little impact on compute resources.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "NfsExports": { + "target": "com.amazonaws.fsx#OpenZFSNfsExports", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration object for mounting a Network File System (NFS) file system.

                                                                      " + } + }, + "UserAndGroupQuotas": { + "target": "com.amazonaws.fsx#OpenZFSUserAndGroupQuotas", + "traits": { + "smithy.api#documentation": "

                                                                      An object specifying how much storage users or groups can use on the volume.

                                                                      " + } + }, + "ReadOnly": { + "target": "com.amazonaws.fsx#ReadOnly", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value indicating whether the volume is read-only.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Used to specify changes to the OpenZFS configuration for the volume that you are\n updating.

                                                                      " + } + }, + "com.amazonaws.fsx#UpdateSnapshot": { + "type": "operation", + "input": { + "target": "com.amazonaws.fsx#UpdateSnapshotRequest" + }, + "output": { + "target": "com.amazonaws.fsx#UpdateSnapshotResponse" + }, + "errors": [ + { + "target": "com.amazonaws.fsx#BadRequest" + }, + { + "target": "com.amazonaws.fsx#InternalServerError" + }, + { + "target": "com.amazonaws.fsx#SnapshotNotFound" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the name of a snapshot.

                                                                      ", + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.fsx#UpdateSnapshotRequest": { + "type": "structure", + "members": { + "ClientRequestToken": { + "target": "com.amazonaws.fsx#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {} + } + }, + "Name": { + "target": "com.amazonaws.fsx#SnapshotName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the snapshot to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "SnapshotId": { + "target": "com.amazonaws.fsx#SnapshotId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the snapshot that you want to update, in the format\n fsvolsnap-0123456789abcdef0.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.fsx#UpdateSnapshotResponse": { + "type": "structure", + "members": { + "Snapshot": { + "target": "com.amazonaws.fsx#Snapshot", + "traits": { + "smithy.api#documentation": "

                                                                      Returned after a successful UpdateSnapshot operation, describing the\n snapshot that you updated.

                                                                      " + } + } + } + }, "com.amazonaws.fsx#UpdateStorageVirtualMachine": { "type": "operation", "input": { @@ -5901,7 +7924,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. \n Pleae note, account credentials are not returned in the response payload.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the Microsoft Active Directory (AD) configuration of an SVM joined to an AD. \n Please note, account credentials are not returned in the response payload.

                                                                      " } }, "com.amazonaws.fsx#UpdateVolume": { @@ -5930,7 +7953,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates an Amazon FSx for NetApp ONTAP volume's configuration.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the configuration of an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS volume.

                                                                      " } }, "com.amazonaws.fsx#UpdateVolumeRequest": { @@ -5945,14 +7968,26 @@ "VolumeId": { "target": "com.amazonaws.fsx#VolumeId", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the volume that you want to update, formatted fsvol-0123456789abcdef0.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the volume that you want to update, in the format\n fsvol-0123456789abcdef0.

                                                                      ", "smithy.api#required": {} } }, "OntapConfiguration": { "target": "com.amazonaws.fsx#UpdateOntapVolumeConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      The ONTAP configuration of the volume you are updating.

                                                                      " + "smithy.api#documentation": "

                                                                      The configuration of the ONTAP volume that you are updating.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.fsx#VolumeName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the OpenZFS volume. OpenZFS root volumes are automatically named\n FSX. Child volume names must be unique among their parent volume's\n children. The name of the volume is part of the mount string for the OpenZFS volume.

                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#UpdateOpenZFSVolumeConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of the OpenZFS volume that you are updating.

                                                                      " } } } @@ -5963,7 +7998,7 @@ "Volume": { "target": "com.amazonaws.fsx#Volume", "traits": { - "smithy.api#documentation": "

                                                                      Returned after a successful UpdateVolume API operation, describing the volume just updated.

                                                                      " + "smithy.api#documentation": "

                                                                      A description of the volume just updated. Returned after a successful\n UpdateVolume API operation.

                                                                      " } } } @@ -5980,7 +8015,7 @@ "Lifecycle": { "target": "com.amazonaws.fsx#VolumeLifecycle", "traits": { - "smithy.api#documentation": "

                                                                      The lifecycle status of the volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n CREATED - The volume is fully available for use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - Amazon FSx is deleting an existing volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx was unable to create the volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - The volume is in a failed but recoverable state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - Amazon FSx has not started creating the volume.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The lifecycle status of the volume.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AVAILABLE - The volume is fully available for\n use.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATED - The volume has been created.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n CREATING - Amazon FSx is creating the new volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DELETING - Amazon FSx is deleting an existing\n volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FAILED - Amazon FSx was unable to create the\n volume.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MISCONFIGURED - The volume is in a failed but recoverable\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n PENDING - Amazon FSx hasn't started creating the\n volume.

                                                                        \n
                                                                      • \n
                                                                      " } }, "Name": { @@ -6007,18 +8042,30 @@ "VolumeType": { "target": "com.amazonaws.fsx#VolumeType", "traits": { - "smithy.api#documentation": "

                                                                      The type of volume; ONTAP is the only valid volume type.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of the volume.

                                                                      " } }, "LifecycleTransitionReason": { "target": "com.amazonaws.fsx#LifecycleTransitionReason", "traits": { - "smithy.api#documentation": "

                                                                      Describes why the volume lifecycle state changed.

                                                                      " + "smithy.api#documentation": "

                                                                      The reason why the volume lifecycle status changed.

                                                                      " + } + }, + "AdministrativeActions": { + "target": "com.amazonaws.fsx#AdministrativeActions", + "traits": { + "smithy.api#documentation": "

                                                                      A list of administrative actions for the file system that are in process or waiting to\n be processed. Administrative actions describe changes to the Amazon FSx system\n that you initiated.

                                                                      " + } + }, + "OpenZFSConfiguration": { + "target": "com.amazonaws.fsx#OpenZFSVolumeConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of an Amazon FSx for OpenZFS volume.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes an Amazon FSx for NetApp ONTAP volume.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes an Amazon FSx for NetApp ONTAP or Amazon FSx for OpenZFS\n volume.

                                                                      " } }, "com.amazonaws.fsx#VolumeCapacity": { @@ -6048,7 +8095,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A filter used to restrict the results of describe calls for\n Amazon FSx for NetApp ONTAP volumes. You can use multiple\n filters to return results that meet all applied filter requirements.

                                                                      " + "smithy.api#documentation": "

                                                                      A filter used to restrict the results of describe calls for Amazon FSx for\n NetApp ONTAP or Amazon FSx for OpenZFS volumes. You can use multiple filters to\n return results that meet all applied filter requirements.

                                                                      " } }, "com.amazonaws.fsx#VolumeFilterName": { @@ -6149,6 +8196,10 @@ { "value": "PENDING", "name": "PENDING" + }, + { + "value": "AVAILABLE", + "name": "AVAILABLE" } ] } @@ -6175,6 +8226,16 @@ "smithy.api#error": "client" } }, + "com.amazonaws.fsx#VolumePath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[A-za-z0-9\\_\\.\\:\\-\\/]*$" + } + }, "com.amazonaws.fsx#VolumeType": { "type": "string", "traits": { @@ -6182,6 +8243,10 @@ { "value": "ONTAP", "name": "ONTAP" + }, + { + "value": "OPENZFS", + "name": "OPENZFS" } ] } @@ -6201,7 +8266,7 @@ "com.amazonaws.fsx#VpcId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID of your virtual private cloud (VPC). For more information, see VPC and\n Subnets in the Amazon VPC User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of your virtual private cloud (VPC). For more information, see VPC and\n subnets in the Amazon VPC User Guide.

                                                                      ", "smithy.api#length": { "min": 12, "max": 21 diff --git a/codegen/sdk-codegen/aws-models/glue.json b/codegen/sdk-codegen/aws-models/glue.json index 8c5b314d21ef..b23731d2523d 100644 --- a/codegen/sdk-codegen/aws-models/glue.json +++ b/codegen/sdk-codegen/aws-models/glue.json @@ -31,6 +31,21 @@ "shapes": { "com.amazonaws.glue#AWSGlue": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Glue", + "arnNamespace": "glue", + "cloudFormationName": "Glue", + "cloudTrailEventSource": "glue.amazonaws.com", + "endpointPrefix": "glue" + }, + "aws.auth#sigv4": { + "name": "glue" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Glue\n

                                                                      Defines the public endpoint for the Glue service.

                                                                      ", + "smithy.api#title": "AWS Glue" + }, "version": "2017-03-31", "operations": [ { @@ -531,22 +546,7 @@ { "target": "com.amazonaws.glue#UpdateWorkflow" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Glue", - "arnNamespace": "glue", - "cloudFormationName": "Glue", - "cloudTrailEventSource": "glue.amazonaws.com", - "endpointPrefix": "glue" - }, - "aws.auth#sigv4": { - "name": "glue" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Glue\n

                                                                      Defines the public endpoint for the Glue service.

                                                                      ", - "smithy.api#title": "AWS Glue" - } + ] }, "com.amazonaws.glue#AccessDeniedException": { "type": "structure", @@ -925,6 +925,9 @@ { "target": "com.amazonaws.glue#EntityNotFoundException" }, + { + "target": "com.amazonaws.glue#GlueEncryptionException" + }, { "target": "com.amazonaws.glue#InternalServiceException" }, @@ -933,6 +936,9 @@ }, { "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" } ], "traits": { @@ -973,6 +979,12 @@ "smithy.api#documentation": "

                                                                      A list of the table to delete.

                                                                      ", "smithy.api#required": {} } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to delete the table contents.

                                                                      " + } } } }, @@ -1321,6 +1333,9 @@ { "target": "com.amazonaws.glue#InvalidInputException" }, + { + "target": "com.amazonaws.glue#InvalidStateException" + }, { "target": "com.amazonaws.glue#OperationTimeoutException" } @@ -5393,6 +5408,9 @@ { "target": "com.amazonaws.glue#OperationTimeoutException" }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" + }, { "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" } @@ -5429,6 +5447,12 @@ "traits": { "smithy.api#documentation": "

                                                                      A list of partition indexes, PartitionIndex structures, to create in the table.

                                                                      " } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the transaction.

                                                                      " + } } } }, @@ -7090,6 +7114,9 @@ }, { "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" } ], "traits": { @@ -7118,6 +7145,12 @@ "smithy.api#documentation": "

                                                                      The name of the table to be deleted. For Hive\n compatibility, this name is entirely lowercase.

                                                                      ", "smithy.api#required": {} } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to delete the table contents.

                                                                      " + } } } }, @@ -10188,8 +10221,14 @@ { "target": "com.amazonaws.glue#InvalidInputException" }, + { + "target": "com.amazonaws.glue#InvalidStateException" + }, { "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" } ], "traits": { @@ -10253,6 +10292,18 @@ "traits": { "smithy.api#documentation": "

                                                                      When true, specifies not returning the partition column schema. Useful when you are interested only in other partition attributes such as partition values or location. This approach avoids the problem of a large response by not returning duplicate data.

                                                                      " } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to read the partition contents.

                                                                      " + } + }, + "QueryAsOfTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time as of when to read the partition contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                                                      " + } } } }, @@ -11078,6 +11129,9 @@ }, { "target": "com.amazonaws.glue#OperationTimeoutException" + }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" } ], "traits": { @@ -11106,6 +11160,18 @@ "smithy.api#documentation": "

                                                                      The name of the table for which to retrieve the definition. For Hive\n compatibility, this name is entirely lowercase.

                                                                      ", "smithy.api#required": {} } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to read the table contents.

                                                                      " + } + }, + "QueryAsOfTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                                                      " + } } } }, @@ -11352,6 +11418,18 @@ "traits": { "smithy.api#documentation": "

                                                                      The maximum number of tables to return in a single response.

                                                                      " } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to read the table contents.

                                                                      " + } + }, + "QueryAsOfTime": { + "target": "com.amazonaws.glue#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                                                      " + } } } }, @@ -12281,6 +12359,21 @@ "smithy.api#error": "client" } }, + "com.amazonaws.glue#InvalidStateException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.glue#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An error that indicates your data is in an invalid state.

                                                                      ", + "smithy.api#error": "client" + } + }, "com.amazonaws.glue#IsVersionValid": { "type": "boolean" }, @@ -15927,6 +16020,21 @@ } } }, + "com.amazonaws.glue#ResourceNotReadyException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.glue#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A resource was not ready for a transaction.

                                                                      ", + "smithy.api#error": "client" + } + }, "com.amazonaws.glue#ResourceNumberLimitExceededException": { "type": "structure", "members": { @@ -18574,6 +18682,16 @@ } } }, + "com.amazonaws.glue#TransactionIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[\\p{L}\\p{N}\\p{P}]*$" + } + }, "com.amazonaws.glue#TransformEncryption": { "type": "structure", "members": { @@ -20247,6 +20365,9 @@ { "target": "com.amazonaws.glue#OperationTimeoutException" }, + { + "target": "com.amazonaws.glue#ResourceNotReadyException" + }, { "target": "com.amazonaws.glue#ResourceNumberLimitExceededException" } @@ -20283,6 +20404,12 @@ "traits": { "smithy.api#documentation": "

                                                                      By default, UpdateTable always creates an archived version of the table\n before updating it. However, if skipArchive is set to true,\n UpdateTable does not create the archived version.

                                                                      " } + }, + "TransactionId": { + "target": "com.amazonaws.glue#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to update the table contents.

                                                                      " + } } } }, diff --git a/codegen/sdk-codegen/aws-models/inspector2.json b/codegen/sdk-codegen/aws-models/inspector2.json new file mode 100644 index 000000000000..709520d79f62 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/inspector2.json @@ -0,0 +1,6947 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.inspector2#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      You do not have sufficient access to perform this action.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.inspector2#Account": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.inspector2#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of Amazon Inspector for the account.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceStatus": { + "target": "com.amazonaws.inspector2#ResourceStatus", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the status of Amazon Inspector scans by resource type.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An Amazon Web Services account within your environment that Amazon Inspector has been enabled for.

                                                                      " + } + }, + "com.amazonaws.inspector2#AccountAggregation": { + "type": "structure", + "members": { + "findingType": { + "target": "com.amazonaws.inspector2#AggregationFindingType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of finding.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.inspector2#AggregationResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of resource.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The sort order (ascending or descending).

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#AccountSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on\n Amazon Web Services accounts.

                                                                      " + } + }, + "com.amazonaws.inspector2#AccountAggregationResponse": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      The number of findings by severity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An aggregation of findings by Amazon Web Services account ID.

                                                                      " + } + }, + "com.amazonaws.inspector2#AccountId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 12 + }, + "smithy.api#pattern": "^\\d{12}$" + } + }, + "com.amazonaws.inspector2#AccountIdSet": { + "type": "set", + "member": { + "target": "com.amazonaws.inspector2#AccountId" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#AccountList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Account" + } + }, + "com.amazonaws.inspector2#AccountSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#AccountState": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.inspector2#State", + "traits": { + "smithy.api#documentation": "

                                                                      An object detailing the status of Amazon Inspector for the account.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceState": { + "target": "com.amazonaws.inspector2#ResourceState", + "traits": { + "smithy.api#documentation": "

                                                                      An object detailing which resources Amazon Inspector is enabled to scan for the account.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object with details the status of an Amazon Web Services account within your Amazon Inspector environment

                                                                      " + } + }, + "com.amazonaws.inspector2#AccountStateList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#AccountState" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#AggCounts": { + "type": "long" + }, + "com.amazonaws.inspector2#AggregationFindingType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NETWORK_REACHABILITY", + "name": "NETWORK_REACHABILITY" + }, + { + "value": "PACKAGE_VULNERABILITY", + "name": "PACKAGE_VULNERABILITY" + } + ] + } + }, + "com.amazonaws.inspector2#AggregationRequest": { + "type": "union", + "members": { + "accountAggregation": { + "target": "com.amazonaws.inspector2#AccountAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on Amazon Web Services account\n IDs.

                                                                      " + } + }, + "amiAggregation": { + "target": "com.amazonaws.inspector2#AmiAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on Amazon Machine\n Images (AMIs).

                                                                      " + } + }, + "awsEcrContainerAggregation": { + "target": "com.amazonaws.inspector2#AwsEcrContainerAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on Amazon ECR container\n images.

                                                                      " + } + }, + "ec2InstanceAggregation": { + "target": "com.amazonaws.inspector2#Ec2InstanceAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on Amazon EC2\n instances.

                                                                      " + } + }, + "findingTypeAggregation": { + "target": "com.amazonaws.inspector2#FindingTypeAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on finding types.

                                                                      " + } + }, + "imageLayerAggregation": { + "target": "com.amazonaws.inspector2#ImageLayerAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on container image\n layers.

                                                                      " + } + }, + "packageAggregation": { + "target": "com.amazonaws.inspector2#PackageAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on operating system\n package type.

                                                                      " + } + }, + "repositoryAggregation": { + "target": "com.amazonaws.inspector2#RepositoryAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on Amazon ECR repositories.

                                                                      " + } + }, + "titleAggregation": { + "target": "com.amazonaws.inspector2#TitleAggregation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation request based on finding title.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an aggregation request.

                                                                      " + } + }, + "com.amazonaws.inspector2#AggregationResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_EC2_INSTANCE", + "name": "AWS_EC2_INSTANCE" + }, + { + "value": "AWS_ECR_CONTAINER_IMAGE", + "name": "AWS_ECR_CONTAINER_IMAGE" + } + ] + } + }, + "com.amazonaws.inspector2#AggregationResponse": { + "type": "union", + "members": { + "accountAggregation": { + "target": "com.amazonaws.inspector2#AccountAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on Amazon Web Services account\n IDs.

                                                                      " + } + }, + "amiAggregation": { + "target": "com.amazonaws.inspector2#AmiAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on Amazon Machine\n Images (AMIs).

                                                                      " + } + }, + "awsEcrContainerAggregation": { + "target": "com.amazonaws.inspector2#AwsEcrContainerAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on Amazon ECR container\n images.

                                                                      " + } + }, + "ec2InstanceAggregation": { + "target": "com.amazonaws.inspector2#Ec2InstanceAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on Amazon EC2\n instances.

                                                                      " + } + }, + "findingTypeAggregation": { + "target": "com.amazonaws.inspector2#FindingTypeAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on finding types.

                                                                      " + } + }, + "imageLayerAggregation": { + "target": "com.amazonaws.inspector2#ImageLayerAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on container image\n layers.

                                                                      " + } + }, + "packageAggregation": { + "target": "com.amazonaws.inspector2#PackageAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on operating system\n package type.

                                                                      " + } + }, + "repositoryAggregation": { + "target": "com.amazonaws.inspector2#RepositoryAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on Amazon ECR\n repositories.

                                                                      " + } + }, + "titleAggregation": { + "target": "com.amazonaws.inspector2#TitleAggregationResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about an aggregation response based on finding title.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains details about the results of an aggregation type.

                                                                      " + } + }, + "com.amazonaws.inspector2#AggregationResponseList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#AggregationResponse" + } + }, + "com.amazonaws.inspector2#AggregationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "FINDING_TYPE", + "name": "FINDING_TYPE" + }, + { + "value": "PACKAGE", + "name": "PACKAGE" + }, + { + "value": "TITLE", + "name": "TITLE" + }, + { + "value": "REPOSITORY", + "name": "REPOSITORY" + }, + { + "value": "AMI", + "name": "AMI" + }, + { + "value": "AWS_EC2_INSTANCE", + "name": "AWS_EC2_INSTANCE" + }, + { + "value": "AWS_ECR_CONTAINER", + "name": "AWS_ECR_CONTAINER" + }, + { + "value": "IMAGE_LAYER", + "name": "IMAGE_LAYER" + }, + { + "value": "ACCOUNT", + "name": "ACCOUNT" + } + ] + } + }, + "com.amazonaws.inspector2#AmiAggregation": { + "type": "structure", + "members": { + "amis": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The IDs of AMIs to aggregate findings for.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#AmiSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on Amazon machine images (AMIs).

                                                                      " + } + }, + "com.amazonaws.inspector2#AmiAggregationResponse": { + "type": "structure", + "members": { + "ami": { + "target": "com.amazonaws.inspector2#AmiId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the AMI that findings were aggregated for.

                                                                      ", + "smithy.api#required": {} + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID that the AMI belongs.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the count of matched findings per severity.

                                                                      " + } + }, + "affectedInstances": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The IDs of Amazon EC2 instances using this AMI.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains the results of a finding aggregation by AMI.

                                                                      " + } + }, + "com.amazonaws.inspector2#AmiId": { + "type": "string", + "traits": { + "smithy.api#pattern": "^ami-([a-z0-9]{8}|[a-z0-9]{17}|\\*)$" + } + }, + "com.amazonaws.inspector2#AmiSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "AFFECTED_INSTANCES", + "name": "AFFECTED_INSTANCES" + } + ] + } + }, + "com.amazonaws.inspector2#Arn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1011 + } + } + }, + "com.amazonaws.inspector2#AssociateMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#AssociateMemberRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#AssociateMemberResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates an Amazon Web Services account with an Amazon Inspector delegated administrator.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/members/associate", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#AssociateMemberRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the member account to be associated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#AssociateMemberResponse": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the successfully associated member account.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#AutoEnable": { + "type": "structure", + "members": { + "ec2": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Represents whether Amazon EC2 scans are automatically enabled for new members of your Amazon Inspector\n organization.

                                                                      ", + "smithy.api#required": {} + } + }, + "ecr": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Represents whether Amazon ECR scans are automatically enabled for new members of your Amazon Inspector\n organization.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Represents which scan types are automatically enabled for new members of your Amazon Inspector organization.

                                                                      " + } + }, + "com.amazonaws.inspector2#AwsEc2InstanceDetails": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the Amazon EC2 instance.

                                                                      " + } + }, + "imageId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The image ID of the Amazon EC2 instance.

                                                                      " + } + }, + "ipV4Addresses": { + "target": "com.amazonaws.inspector2#IpV4AddressList", + "traits": { + "smithy.api#documentation": "

                                                                      The IPv4 addresses of the Amazon EC2 instance.

                                                                      " + } + }, + "ipV6Addresses": { + "target": "com.amazonaws.inspector2#IpV6AddressList", + "traits": { + "smithy.api#documentation": "

                                                                      The IPv6 addresses of the Amazon EC2 instance.

                                                                      " + } + }, + "keyName": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the key pair used to launch the Amazon EC2 instance.

                                                                      " + } + }, + "iamInstanceProfileArn": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The IAM instance profile ARN of the Amazon EC2 instance.

                                                                      " + } + }, + "vpcId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The VPC ID of the Amazon EC2 instance.

                                                                      " + } + }, + "subnetId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The subnet ID of the Amazon EC2 instance.

                                                                      " + } + }, + "launchedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the Amazon EC2 instance was launched at.

                                                                      " + } + }, + "platform": { + "target": "com.amazonaws.inspector2#Platform", + "traits": { + "smithy.api#documentation": "

                                                                      The platform of the Amazon EC2 instance.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon EC2 instance involved in a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#AwsEcrContainerAggregation": { + "type": "structure", + "members": { + "resourceIds": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The container resource IDs.

                                                                      " + } + }, + "imageShas": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The image SHA values.

                                                                      " + } + }, + "repositories": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The container repositories.

                                                                      " + } + }, + "architectures": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the containers.

                                                                      " + } + }, + "imageTags": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The image tags.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The sort order (ascending or descending).

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#AwsEcrContainerSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An aggregation of information about Amazon ECR containers.

                                                                      " + } + }, + "com.amazonaws.inspector2#AwsEcrContainerAggregationResponse": { + "type": "structure", + "members": { + "resourceId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The resource ID of the container.

                                                                      ", + "smithy.api#required": {} + } + }, + "imageSha": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The SHA value of the container image.

                                                                      " + } + }, + "repository": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The container repository.

                                                                      " + } + }, + "architecture": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the container.

                                                                      " + } + }, + "imageTags": { + "target": "com.amazonaws.inspector2#StringList", + "traits": { + "smithy.api#documentation": "

                                                                      The container image stags.

                                                                      " + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the account that owns the container.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      The number of finding by severity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An aggregation of information about Amazon ECR containers.

                                                                      " + } + }, + "com.amazonaws.inspector2#AwsEcrContainerImageDetails": { + "type": "structure", + "members": { + "repositoryName": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the repository the Amazon ECR container image resides in.

                                                                      ", + "smithy.api#required": {} + } + }, + "imageTags": { + "target": "com.amazonaws.inspector2#ImageTagList", + "traits": { + "smithy.api#documentation": "

                                                                      The image tags attached to the Amazon ECR container image.

                                                                      " + } + }, + "pushedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the Amazon ECR container image was pushed.

                                                                      " + } + }, + "author": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The image author of the Amazon ECR container image.

                                                                      " + } + }, + "architecture": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the Amazon ECR container image.

                                                                      " + } + }, + "imageHash": { + "target": "com.amazonaws.inspector2#ImageHash", + "traits": { + "smithy.api#documentation": "

                                                                      The image hash of the Amazon ECR container image.

                                                                      ", + "smithy.api#required": {} + } + }, + "registry": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The registry the Amazon ECR container image belongs to.

                                                                      ", + "smithy.api#required": {} + } + }, + "platform": { + "target": "com.amazonaws.inspector2#Platform", + "traits": { + "smithy.api#documentation": "

                                                                      The platform of the Amazon ECR container image.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The image details of the Amazon ECR container image.

                                                                      " + } + }, + "com.amazonaws.inspector2#AwsEcrContainerSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#BadRequestException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      One or more tags submitted as part of the request is not valid.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.inspector2#BatchGetAccountStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#BatchGetAccountStatusRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#BatchGetAccountStatusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the Amazon Inspector status of multiple Amazon Web Services accounts within your environment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/status/batch/get", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#BatchGetAccountStatusRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.inspector2#AccountIdSet", + "traits": { + "smithy.api#documentation": "

                                                                      The 12-digit Amazon Web Services account IDs of the accounts to retrieve Amazon Inspector status for.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#BatchGetAccountStatusResponse": { + "type": "structure", + "members": { + "accounts": { + "target": "com.amazonaws.inspector2#AccountStateList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of objects that provide details on the status of Amazon Inspector for each of the requested accounts.

                                                                      ", + "smithy.api#required": {} + } + }, + "failedAccounts": { + "target": "com.amazonaws.inspector2#FailedAccountList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of objects detailing any accounts that failed to enable Amazon Inspector and why.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#BatchGetFreeTrialInfo": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#BatchGetFreeTrialInfoRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#BatchGetFreeTrialInfoResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets free trial status for multiple Amazon Web Services accounts.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/freetrialinfo/batchget", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#BatchGetFreeTrialInfoRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.inspector2#MeteringAccountIdList", + "traits": { + "smithy.api#documentation": "

                                                                      The account IDs to get free trial status for.

                                                                      ", + "smithy.api#length": { + "min": 1, + "max": 100 + }, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#BatchGetFreeTrialInfoResponse": { + "type": "structure", + "members": { + "accounts": { + "target": "com.amazonaws.inspector2#FreeTrialAccountInfoList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of objects that provide Amazon Inspector free trial details for each of the requested accounts.\n

                                                                      ", + "smithy.api#required": {} + } + }, + "failedAccounts": { + "target": "com.amazonaws.inspector2#FreeTrialInfoErrorList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of objects detailing any accounts that free trial data could not be returned for.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#CancelFindingsReport": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#CancelFindingsReportRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#CancelFindingsReportResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Cancels the given findings report.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/reporting/cancel", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#CancelFindingsReportRequest": { + "type": "structure", + "members": { + "reportId": { + "target": "com.amazonaws.inspector2#ReportId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the report to be canceled.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#CancelFindingsReportResponse": { + "type": "structure", + "members": { + "reportId": { + "target": "com.amazonaws.inspector2#ReportId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the canceled report.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.inspector2#Component": { + "type": "string" + }, + "com.amazonaws.inspector2#ComponentType": { + "type": "string" + }, + "com.amazonaws.inspector2#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the conflicting resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the conflicting resource.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A conflict occurred.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.inspector2#Counts": { + "type": "structure", + "members": { + "count": { + "target": "com.amazonaws.inspector2#AggCounts", + "traits": { + "smithy.api#documentation": "

                                                                      The number of resources.

                                                                      " + } + }, + "groupKey": { + "target": "com.amazonaws.inspector2#GroupKey", + "traits": { + "smithy.api#documentation": "

                                                                      The key associated with this group

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      a structure that contains information on the count of resources within a group.

                                                                      " + } + }, + "com.amazonaws.inspector2#CountsList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Counts" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.inspector2#CoverageFilterCriteria": { + "type": "structure", + "members": { + "scanStatusCode": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The scan status code to filter on.

                                                                      " + } + }, + "scanStatusReason": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The scan status reason to filter on.

                                                                      " + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of Amazon Web Services account IDs to return coverage statistics for.

                                                                      " + } + }, + "resourceId": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of Amazon Web Services resource IDs to return coverage statistics for.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of Amazon Web Services resource types to return coverage statistics for.

                                                                      " + } + }, + "scanType": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of Amazon Inspector scan types to return coverage statistics for.

                                                                      " + } + }, + "ecrRepositoryName": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR repository name to filter on.

                                                                      " + } + }, + "ecrImageTags": { + "target": "com.amazonaws.inspector2#CoverageStringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon ECR image tags to filter on.

                                                                      " + } + }, + "ec2InstanceTags": { + "target": "com.amazonaws.inspector2#CoverageMapFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon EC2 instance tags to filter on.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that identifies filter criteria for GetCoverageStatistics.

                                                                      " + } + }, + "com.amazonaws.inspector2#CoverageMapComparison": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EQUALS", + "name": "EQUALS" + } + ] + } + }, + "com.amazonaws.inspector2#CoverageMapFilter": { + "type": "structure", + "members": { + "comparison": { + "target": "com.amazonaws.inspector2#CoverageMapComparison", + "traits": { + "smithy.api#documentation": "

                                                                      The operator to compare coverage on.

                                                                      ", + "smithy.api#required": {} + } + }, + "key": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The tag key associated with the coverage map filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The tag value associated with the coverage map filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details of a coverage map filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#CoverageMapFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#CoverageMapFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#CoverageResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_EC2_INSTANCE", + "name": "AWS_EC2_INSTANCE" + }, + { + "value": "AWS_ECR_CONTAINER_IMAGE", + "name": "AWS_ECR_CONTAINER_IMAGE" + }, + { + "value": "AWS_ECR_REPOSITORY", + "name": "AWS_ECR_REPOSITORY" + } + ] + } + }, + "com.amazonaws.inspector2#CoverageStringComparison": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EQUALS", + "name": "EQUALS" + }, + { + "value": "NOT_EQUALS", + "name": "NOT_EQUALS" + } + ] + } + }, + "com.amazonaws.inspector2#CoverageStringFilter": { + "type": "structure", + "members": { + "comparison": { + "target": "com.amazonaws.inspector2#CoverageStringComparison", + "traits": { + "smithy.api#documentation": "

                                                                      The operator to compare strings on.

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.inspector2#CoverageStringInput", + "traits": { + "smithy.api#documentation": "

                                                                      The value to compare strings on.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details of a coverage string filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#CoverageStringFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#CoverageStringFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#CoverageStringInput": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#CoveredResource": { + "type": "structure", + "members": { + "resourceType": { + "target": "com.amazonaws.inspector2#CoverageResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the covered resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.inspector2#ResourceId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the covered resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the covered resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "scanType": { + "target": "com.amazonaws.inspector2#ScanType", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Inspector scan type covering the resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "scanStatus": { + "target": "com.amazonaws.inspector2#ScanStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the scan covering the resource.

                                                                      " + } + }, + "resourceMetadata": { + "target": "com.amazonaws.inspector2#ResourceScanMetadata", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the metadata.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about a resource covered by Amazon Inspector.

                                                                      " + } + }, + "com.amazonaws.inspector2#CoveredResources": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#CoveredResource" + } + }, + "com.amazonaws.inspector2#CreateFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#CreateFilterRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#CreateFilterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#BadRequestException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a filter resource using specified filter criteria.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/filters/create", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#CreateFilterRequest": { + "type": "structure", + "members": { + "action": { + "target": "com.amazonaws.inspector2#FilterAction", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the action that is to be applied to the findings that match the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.inspector2#FilterDescription", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the filter.

                                                                      " + } + }, + "filterCriteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the criteria to be used in the filter for querying findings.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.inspector2#FilterName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the filter. Minimum length of 3. Maximum length of 64. Valid characters\n include alphanumeric characters, dot (.), underscore (_), and dash (-). Spaces are not\n allowed.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      A list of tags for the filter.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#CreateFilterResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the successfully created filter.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#CreateFindingsReport": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#CreateFindingsReportRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#CreateFindingsReportResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a finding report.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/reporting/create", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#CreateFindingsReportRequest": { + "type": "structure", + "members": { + "filterCriteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      The filter criteria to apply to the results of the finding report.

                                                                      " + } + }, + "reportFormat": { + "target": "com.amazonaws.inspector2#ReportFormat", + "traits": { + "smithy.api#documentation": "

                                                                      The format to generate the report in.

                                                                      ", + "smithy.api#required": {} + } + }, + "s3Destination": { + "target": "com.amazonaws.inspector2#Destination", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 export destination for the report.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#CreateFindingsReportResponse": { + "type": "structure", + "members": { + "reportId": { + "target": "com.amazonaws.inspector2#ReportId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the report.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#Currency": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "USD", + "name": "USD" + } + ] + } + }, + "com.amazonaws.inspector2#CvssScore": { + "type": "structure", + "members": { + "baseScore": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      The base CVSS score used for the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "scoringVector": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The vector string of the CVSS score.

                                                                      ", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The version of CVSS used for the score.

                                                                      ", + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The source of the CVSS score.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS score for a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#CvssScoreAdjustment": { + "type": "structure", + "members": { + "metric": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The metric used to adjust the CVSS score.

                                                                      ", + "smithy.api#required": {} + } + }, + "reason": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The reason the CVSS score has been adjustment.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details on adjustments Amazon Inspector made to the CVSS score for a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#CvssScoreAdjustmentList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#CvssScoreAdjustment" + } + }, + "com.amazonaws.inspector2#CvssScoreDetails": { + "type": "structure", + "members": { + "scoreSource": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The source for the CVSS score.

                                                                      ", + "smithy.api#required": {} + } + }, + "cvssSource": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The source of the CVSS data.

                                                                      " + } + }, + "version": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS version used in scoring.

                                                                      ", + "smithy.api#required": {} + } + }, + "score": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      The CVSS score.

                                                                      ", + "smithy.api#required": {} + } + }, + "scoringVector": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The vector for the CVSS score.

                                                                      ", + "smithy.api#required": {} + } + }, + "adjustments": { + "target": "com.amazonaws.inspector2#CvssScoreAdjustmentList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about adjustment Amazon Inspector made to the CVSS score.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the CVSS score.

                                                                      " + } + }, + "com.amazonaws.inspector2#CvssScoreList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#CvssScore" + } + }, + "com.amazonaws.inspector2#DateFilter": { + "type": "structure", + "members": { + "startInclusive": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      A timestamp representing the start of the time period filtered on.

                                                                      " + } + }, + "endInclusive": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      A timestamp representing the end of the time period filtered on.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details on the time range used to filter findings.

                                                                      " + } + }, + "com.amazonaws.inspector2#DateFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#DateFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#DateTimeTimestamp": { + "type": "timestamp" + }, + "com.amazonaws.inspector2#DelegatedAdmin": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the Amazon Inspector delegated administrator for your organization.

                                                                      " + } + }, + "relationshipStatus": { + "target": "com.amazonaws.inspector2#RelationshipStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the Amazon Inspector delegated administrator.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon Inspector delegated administrator for your organization.

                                                                      " + } + }, + "com.amazonaws.inspector2#DelegatedAdminAccount": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the Amazon Inspector delegated administrator for your organization.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.inspector2#DelegatedAdminStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the Amazon Inspector delegated administrator.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon Inspector delegated administrator for your organization.

                                                                      " + } + }, + "com.amazonaws.inspector2#DelegatedAdminAccountList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#DelegatedAdminAccount" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 5 + } + } + }, + "com.amazonaws.inspector2#DelegatedAdminStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLE_IN_PROGRESS", + "name": "DISABLE_IN_PROGRESS" + } + ] + } + }, + "com.amazonaws.inspector2#DeleteFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#DeleteFilterRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#DeleteFilterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a filter resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/filters/delete", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#DeleteFilterRequest": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the filter to be deleted.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#DeleteFilterResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the filter that has been deleted.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#DescribeOrganizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#DescribeOrganizationConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#DescribeOrganizationConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Describe Amazon Inspector configuration settings for an Amazon Web Services organization

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/organizationconfiguration/describe", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#DescribeOrganizationConfigurationRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.inspector2#DescribeOrganizationConfigurationResponse": { + "type": "structure", + "members": { + "autoEnable": { + "target": "com.amazonaws.inspector2#AutoEnable", + "traits": { + "smithy.api#documentation": "

                                                                      The scan types are automatically enabled for new members of your organization.

                                                                      " + } + }, + "maxAccountLimitReached": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Represents whether your organization has reached the maximum Amazon Web Services account limit for Amazon Inspector.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#Destination": { + "type": "structure", + "members": { + "bucketName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the Amazon S3 bucket to export findings to.

                                                                      ", + "smithy.api#required": {} + } + }, + "keyPrefix": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The prefix of the KMS key used to export findings.

                                                                      " + } + }, + "kmsKeyArn": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the KMS key used to encrypt data when exporting findings.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details of the Amazon S3 bucket and KMS key used to export findings.

                                                                      " + } + }, + "com.amazonaws.inspector2#Disable": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#DisableRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#DisableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disables Amazon Inspector scans for one or more Amazon Web Services accounts. Disabling all scan types in an account\n disables the Amazon Inspector service.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/disable", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#DisableDelegatedAdminAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#DisableDelegatedAdminAccountRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#DisableDelegatedAdminAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#ConflictException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disables the Amazon Inspector delegated administrator for your organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/delegatedadminaccounts/disable", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#DisableDelegatedAdminAccountRequest": { + "type": "structure", + "members": { + "delegatedAdminAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the current Amazon Inspector delegated administrator.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#DisableDelegatedAdminAccountResponse": { + "type": "structure", + "members": { + "delegatedAdminAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the successfully disabled delegated administrator.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#DisableRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.inspector2#AccountIdSet", + "traits": { + "smithy.api#documentation": "

                                                                      An array of account IDs you want to disable Amazon Inspector scans for.

                                                                      " + } + }, + "resourceTypes": { + "target": "com.amazonaws.inspector2#DisableResourceTypeList", + "traits": { + "smithy.api#documentation": "

                                                                      The resource scan types you want to disable.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#DisableResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#ResourceScanType" + }, + "traits": { + "smithy.api#length": { + "max": 2 + } + } + }, + "com.amazonaws.inspector2#DisableResponse": { + "type": "structure", + "members": { + "accounts": { + "target": "com.amazonaws.inspector2#AccountList", + "traits": { + "smithy.api#documentation": "

                                                                      Information on the accounts that have had Amazon Inspector scans successfully disabled. Details are\n provided for each account.

                                                                      ", + "smithy.api#required": {} + } + }, + "failedAccounts": { + "target": "com.amazonaws.inspector2#FailedAccountList", + "traits": { + "smithy.api#documentation": "

                                                                      Information on any accounts for which Amazon Inspector scans could not be disabled. Details are\n provided for each account.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#DisassociateMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#DisassociateMemberRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#DisassociateMemberResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates a member account from an Amazon Inspector delegated administrator.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/members/disassociate", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#DisassociateMemberRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the member account to disassociate.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#DisassociateMemberResponse": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the successfully disassociated member.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#Ec2InstanceAggregation": { + "type": "structure", + "members": { + "amis": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The AMI IDs associated with the Amazon EC2 instances to aggregate findings for.

                                                                      " + } + }, + "operatingSystems": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The operating system types to aggregate findings for. Valid values must be uppercase and\n underscore separated, examples are ORACLE_LINUX_7 and\n ALPINE_LINUX_3_8.

                                                                      " + } + }, + "instanceIds": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon EC2 instance IDs to aggregate findings for.

                                                                      " + } + }, + "instanceTags": { + "target": "com.amazonaws.inspector2#MapFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon EC2 instance tags to aggregate findings for.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#Ec2InstanceSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on Amazon EC2 instances.

                                                                      " + } + }, + "com.amazonaws.inspector2#Ec2InstanceAggregationResponse": { + "type": "structure", + "members": { + "instanceId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon EC2 instance ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "ami": { + "target": "com.amazonaws.inspector2#AmiId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Machine Image (AMI) of the Amazon EC2 instance.

                                                                      " + } + }, + "operatingSystem": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The operating system of the Amazon EC2 instance.

                                                                      " + } + }, + "instanceTags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the instance.

                                                                      " + } + }, + "accountId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account the Amazon EC2 instance belongs to.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the count of matched findings per severity.

                                                                      " + } + }, + "networkFindings": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of network findings for the Amazon EC2 instance.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains the results of a finding aggregation by Amazon EC2 instance.

                                                                      " + } + }, + "com.amazonaws.inspector2#Ec2InstanceSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NETWORK_FINDINGS", + "name": "NETWORK_FINDINGS" + }, + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#Ec2Metadata": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the instance.

                                                                      " + } + }, + "amiId": { + "target": "com.amazonaws.inspector2#AmiId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Machine Image (AMI) used to launch the instance.

                                                                      " + } + }, + "platform": { + "target": "com.amazonaws.inspector2#Ec2Platform", + "traits": { + "smithy.api#documentation": "

                                                                      The platform of the instance.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Meta data details of an Amazon EC2 instance.

                                                                      " + } + }, + "com.amazonaws.inspector2#Ec2Platform": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WINDOWS", + "name": "WINDOWS" + }, + { + "value": "LINUX", + "name": "LINUX" + }, + { + "value": "UNKNOWN", + "name": "UNKNOWN" + } + ] + } + }, + "com.amazonaws.inspector2#EcrContainerImageMetadata": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.inspector2#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      Tags associated with the Amazon ECR image metadata.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on the Amazon ECR image metadata associated with a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#EcrRepositoryMetadata": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the Amazon ECR repository.

                                                                      " + } + }, + "scanFrequency": { + "target": "com.amazonaws.inspector2#EcrScanFrequency", + "traits": { + "smithy.api#documentation": "

                                                                      The frequency of scans.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on the Amazon ECR repository metadata associated with a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#EcrScanFrequency": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "MANUAL", + "name": "MANUAL" + }, + { + "value": "SCAN_ON_PUSH", + "name": "SCAN_ON_PUSH" + }, + { + "value": "CONTINUOUS_SCAN", + "name": "CONTINUOUS_SCAN" + } + ] + } + }, + "com.amazonaws.inspector2#Enable": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#EnableRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#EnableResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Enables Amazon Inspector scans for one or more Amazon Web Services accounts.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/enable", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#EnableDelegatedAdminAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#EnableDelegatedAdminAccountRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#EnableDelegatedAdminAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#ConflictException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Enables the Amazon Inspector delegated administrator for your Organizations organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/delegatedadminaccounts/enable", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#EnableDelegatedAdminAccountRequest": { + "type": "structure", + "members": { + "delegatedAdminAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the Amazon Inspector delegated administrator.

                                                                      ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.inspector2#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      The idempotency token for the request.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.inspector2#EnableDelegatedAdminAccountResponse": { + "type": "structure", + "members": { + "delegatedAdminAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the successfully Amazon Inspector delegated administrator.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#EnableRequest": { + "type": "structure", + "members": { + "accountIds": { + "target": "com.amazonaws.inspector2#AccountIdSet", + "traits": { + "smithy.api#documentation": "

                                                                      A list of account IDs you want to enable Amazon Inspector scans for.

                                                                      " + } + }, + "resourceTypes": { + "target": "com.amazonaws.inspector2#EnableResourceTypeList", + "traits": { + "smithy.api#documentation": "

                                                                      The resource scan types you want to enable.

                                                                      ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.inspector2#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      The idempotency token for the request.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.inspector2#EnableResourceTypeList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#ResourceScanType" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2 + } + } + }, + "com.amazonaws.inspector2#EnableResponse": { + "type": "structure", + "members": { + "accounts": { + "target": "com.amazonaws.inspector2#AccountList", + "traits": { + "smithy.api#documentation": "

                                                                      Information on the accounts that have had Amazon Inspector scans successfully enabled. Details are\n provided for each account.

                                                                      ", + "smithy.api#required": {} + } + }, + "failedAccounts": { + "target": "com.amazonaws.inspector2#FailedAccountList", + "traits": { + "smithy.api#documentation": "

                                                                      Information on any accounts for which Amazon Inspector scans could not be enabled. Details are\n provided for each account.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ErrorCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALREADY_ENABLED", + "name": "ALREADY_ENABLED" + }, + { + "value": "ENABLE_IN_PROGRESS", + "name": "ENABLE_IN_PROGRESS" + }, + { + "value": "DISABLE_IN_PROGRESS", + "name": "DISABLE_IN_PROGRESS" + }, + { + "value": "SUSPEND_IN_PROGRESS", + "name": "SUSPEND_IN_PROGRESS" + }, + { + "value": "RESOURCE_NOT_FOUND", + "name": "RESOURCE_NOT_FOUND" + }, + { + "value": "ACCESS_DENIED", + "name": "ACCESS_DENIED" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "SSM_UNAVAILABLE", + "name": "SSM_UNAVAILABLE" + }, + { + "value": "SSM_THROTTLED", + "name": "SSM_THROTTLED" + }, + { + "value": "EVENTBRIDGE_UNAVAILABLE", + "name": "EVENTBRIDGE_UNAVAILABLE" + }, + { + "value": "EVENTBRIDGE_THROTTLED", + "name": "EVENTBRIDGE_THROTTLED" + }, + { + "value": "RESOURCE_SCAN_NOT_DISABLED", + "name": "RESOURCE_SCAN_NOT_DISABLED" + }, + { + "value": "DISASSOCIATE_ALL_MEMBERS", + "name": "DISASSOCIATE_ALL_MEMBERS" + } + ] + } + }, + "com.amazonaws.inspector2#ErrorMessage": { + "type": "string" + }, + "com.amazonaws.inspector2#ExternalReportStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SUCCEEDED", + "name": "SUCCEEDED" + }, + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + }, + { + "value": "CANCELLED", + "name": "CANCELLED" + }, + { + "value": "FAILED", + "name": "FAILED" + } + ] + } + }, + "com.amazonaws.inspector2#FailedAccount": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.inspector2#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of Amazon Inspector for the account.

                                                                      " + } + }, + "resourceStatus": { + "target": "com.amazonaws.inspector2#ResourceStatus", + "traits": { + "smithy.api#documentation": "

                                                                      An object detailing which resources Amazon Inspector is enabled to scan for the account.

                                                                      " + } + }, + "errorCode": { + "target": "com.amazonaws.inspector2#ErrorCode", + "traits": { + "smithy.api#documentation": "

                                                                      The error code explaining why the account failed to enable Amazon Inspector.

                                                                      ", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The error message received when the account failed to enable Amazon Inspector.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object with details on why an account failed to enable Amazon Inspector.

                                                                      " + } + }, + "com.amazonaws.inspector2#FailedAccountList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#FailedAccount" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#FilePath": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#Filter": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) associated with this filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "ownerId": { + "target": "com.amazonaws.inspector2#OwnerId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the account that created the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "name": { + "target": "com.amazonaws.inspector2#FilterName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "criteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the filter criteria associated with this filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "action": { + "target": "com.amazonaws.inspector2#FilterAction", + "traits": { + "smithy.api#documentation": "

                                                                      The action that is to be applied to the findings that match the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "createdAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time this filter was created at.

                                                                      ", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the filter was last updated at.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.inspector2#FilterDescription", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the filter.

                                                                      " + } + }, + "reason": { + "target": "com.amazonaws.inspector2#FilterReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the filter.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about a filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#FilterAction": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NONE", + "name": "NONE" + }, + { + "value": "SUPPRESS", + "name": "SUPPRESS" + } + ] + } + }, + "com.amazonaws.inspector2#FilterArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.inspector2#FilterArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#FilterArn" + } + }, + "com.amazonaws.inspector2#FilterCriteria": { + "type": "structure", + "members": { + "findingArn": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the finding ARNs used to filter findings.

                                                                      " + } + }, + "awsAccountId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon Web Services account IDs used to filter findings.

                                                                      " + } + }, + "findingType": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the finding types used to filter findings.

                                                                      " + } + }, + "severity": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the severity used to filter findings.

                                                                      " + } + }, + "firstObservedAt": { + "target": "com.amazonaws.inspector2#DateFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the date and time a finding was first seen used to filter findings.

                                                                      " + } + }, + "lastObservedAt": { + "target": "com.amazonaws.inspector2#DateFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the date and time a finding was last seen used to filter findings.

                                                                      " + } + }, + "updatedAt": { + "target": "com.amazonaws.inspector2#DateFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the date and time a finding was last updated at used to filter findings.

                                                                      " + } + }, + "findingStatus": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the finding status types used to filter findings.

                                                                      " + } + }, + "title": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the finding title used to filter findings.

                                                                      " + } + }, + "inspectorScore": { + "target": "com.amazonaws.inspector2#NumberFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Inspector score to filter on.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the resource types used to filter findings.

                                                                      " + } + }, + "resourceId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the resource IDs used to filter findings.

                                                                      " + } + }, + "resourceTags": { + "target": "com.amazonaws.inspector2#MapFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the resource tags used to filter findings.

                                                                      " + } + }, + "ec2InstanceImageId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon EC2 instance image IDs used to filter findings.

                                                                      " + } + }, + "ec2InstanceVpcId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon EC2 instance VPC IDs used to filter findings.

                                                                      " + } + }, + "ec2InstanceSubnetId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon EC2 instance subnet IDs used to filter findings.

                                                                      " + } + }, + "ecrImagePushedAt": { + "target": "com.amazonaws.inspector2#DateFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the Amazon ECR image push date and time used to filter findings.

                                                                      " + } + }, + "ecrImageArchitecture": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon ECR image architecture types used to filter findings.

                                                                      " + } + }, + "ecrImageRegistry": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the Amazon ECR registry used to filter findings.

                                                                      " + } + }, + "ecrImageRepositoryName": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the name of the Amazon ECR repository used to filter findings.

                                                                      " + } + }, + "ecrImageTags": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the Amazon ECR container image.

                                                                      " + } + }, + "ecrImageHash": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon ECR image hashes used to filter findings.

                                                                      " + } + }, + "portRange": { + "target": "com.amazonaws.inspector2#PortRangeFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the port ranges used to filter findings.

                                                                      " + } + }, + "networkProtocol": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the ingress source addresses used to filter findings.

                                                                      " + } + }, + "componentId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the component IDs used to filter findings.

                                                                      " + } + }, + "componentType": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the component types used to filter findings.

                                                                      " + } + }, + "vulnerabilityId": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the vulnerability ID used to filter findings.

                                                                      " + } + }, + "vulnerabilitySource": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the vulnerability type used to filter findings.

                                                                      " + } + }, + "vendorSeverity": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the vendor severity used to filter findings.

                                                                      " + } + }, + "vulnerablePackages": { + "target": "com.amazonaws.inspector2#PackageFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the vulnerable packages used to filter findings.

                                                                      " + } + }, + "relatedVulnerabilities": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the related vulnerabilities used to filter findings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details on the criteria used to define the filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#FilterDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.inspector2#FilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Filter" + } + }, + "com.amazonaws.inspector2#FilterName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.inspector2#FilterReason": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.inspector2#Finding": { + "type": "structure", + "members": { + "findingArn": { + "target": "com.amazonaws.inspector2#FindingArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "awsAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID associated with the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "type": { + "target": "com.amazonaws.inspector2#FindingType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.inspector2#FindingDescription", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "title": { + "target": "com.amazonaws.inspector2#FindingTitle", + "traits": { + "smithy.api#documentation": "

                                                                      The title of the finding.

                                                                      " + } + }, + "remediation": { + "target": "com.amazonaws.inspector2#Remediation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the details about how to remediate a finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "severity": { + "target": "com.amazonaws.inspector2#Severity", + "traits": { + "smithy.api#documentation": "

                                                                      The severity of the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "firstObservedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the finding was first observed.

                                                                      ", + "smithy.api#required": {} + } + }, + "lastObservedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the finding was last observed.

                                                                      ", + "smithy.api#required": {} + } + }, + "updatedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the finding was last updated at.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.inspector2#FindingStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "resources": { + "target": "com.amazonaws.inspector2#ResourceList", + "traits": { + "smithy.api#documentation": "

                                                                      Contains information on the resources involved in a finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "inspectorScore": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Inspector score given to the finding.

                                                                      " + } + }, + "inspectorScoreDetails": { + "target": "com.amazonaws.inspector2#InspectorScoreDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details of the Amazon Inspector score.

                                                                      " + } + }, + "networkReachabilityDetails": { + "target": "com.amazonaws.inspector2#NetworkReachabilityDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the details of a network reachability finding.

                                                                      " + } + }, + "packageVulnerabilityDetails": { + "target": "com.amazonaws.inspector2#PackageVulnerabilityDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the details of a package vulnerability finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about an Amazon Inspector finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#FindingArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#FindingDescription": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#FindingList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Finding" + }, + "traits": { + "smithy.api#length": { + "max": 25 + } + } + }, + "com.amazonaws.inspector2#FindingStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "SUPPRESSED", + "name": "SUPPRESSED" + }, + { + "value": "CLOSED", + "name": "CLOSED" + } + ] + } + }, + "com.amazonaws.inspector2#FindingTitle": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#FindingType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NETWORK_REACHABILITY", + "name": "NETWORK_REACHABILITY" + }, + { + "value": "PACKAGE_VULNERABILITY", + "name": "PACKAGE_VULNERABILITY" + } + ] + } + }, + "com.amazonaws.inspector2#FindingTypeAggregation": { + "type": "structure", + "members": { + "findingType": { + "target": "com.amazonaws.inspector2#AggregationFindingType", + "traits": { + "smithy.api#documentation": "

                                                                      The finding type to aggregate.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.inspector2#AggregationResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type to aggregate.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#FindingTypeSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on finding type.

                                                                      " + } + }, + "com.amazonaws.inspector2#FindingTypeAggregationResponse": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account associated with the findings.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains the results of a finding type aggregation.

                                                                      " + } + }, + "com.amazonaws.inspector2#FindingTypeSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#FreeTrialAccountInfo": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#MeteringAccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The account associated with the Amazon Inspector free trial information.

                                                                      ", + "smithy.api#required": {} + } + }, + "freeTrialInfo": { + "target": "com.amazonaws.inspector2#FreeTrialInfoList", + "traits": { + "smithy.api#documentation": "

                                                                      Contains information about the Amazon Inspector free trial for an account.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the Amazon Inspector free trial for an account.

                                                                      " + } + }, + "com.amazonaws.inspector2#FreeTrialAccountInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#FreeTrialAccountInfo" + } + }, + "com.amazonaws.inspector2#FreeTrialInfo": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.inspector2#FreeTrialType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of scan covered by the Amazon Inspector free trail.

                                                                      ", + "smithy.api#required": {} + } + }, + "start": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the Amazon Inspector free trail started for a given account.

                                                                      ", + "smithy.api#required": {} + } + }, + "end": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the Amazon Inspector free trail ends for a given account.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.inspector2#FreeTrialStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the Amazon Inspector free trial for an account.

                                                                      " + } + }, + "com.amazonaws.inspector2#FreeTrialInfoError": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#MeteringAccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The account associated with the Amazon Inspector free trial information.

                                                                      ", + "smithy.api#required": {} + } + }, + "code": { + "target": "com.amazonaws.inspector2#FreeTrialInfoErrorCode", + "traits": { + "smithy.api#documentation": "

                                                                      The error code.

                                                                      ", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The error message returned.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about an error received while accessing free trail data for an account.

                                                                      " + } + }, + "com.amazonaws.inspector2#FreeTrialInfoErrorCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACCESS_DENIED", + "name": "ACCESS_DENIED" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + } + ] + } + }, + "com.amazonaws.inspector2#FreeTrialInfoErrorList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#FreeTrialInfoError" + } + }, + "com.amazonaws.inspector2#FreeTrialInfoList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#FreeTrialInfo" + } + }, + "com.amazonaws.inspector2#FreeTrialStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "INACTIVE", + "name": "INACTIVE" + } + ] + } + }, + "com.amazonaws.inspector2#FreeTrialType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EC2", + "name": "EC2" + }, + { + "value": "ECR", + "name": "ECR" + } + ] + } + }, + "com.amazonaws.inspector2#GetDelegatedAdminAccount": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#GetDelegatedAdminAccountRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#GetDelegatedAdminAccountResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about the Amazon Inspector delegated administrator for your\n organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/delegatedadminaccounts/get", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#GetDelegatedAdminAccountRequest": { + "type": "structure", + "members": {} + }, + "com.amazonaws.inspector2#GetDelegatedAdminAccountResponse": { + "type": "structure", + "members": { + "delegatedAdmin": { + "target": "com.amazonaws.inspector2#DelegatedAdmin", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the Amazon Inspector delegated administrator.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#GetFindingsReportStatus": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#GetFindingsReportStatusRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#GetFindingsReportStatusResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the status of a findings report.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/reporting/status/get", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#GetFindingsReportStatusRequest": { + "type": "structure", + "members": { + "reportId": { + "target": "com.amazonaws.inspector2#ReportId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the report to retrieve the status of.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#GetFindingsReportStatusResponse": { + "type": "structure", + "members": { + "reportId": { + "target": "com.amazonaws.inspector2#ReportId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the report.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.inspector2#ExternalReportStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the report.

                                                                      " + } + }, + "errorCode": { + "target": "com.amazonaws.inspector2#ReportingErrorCode", + "traits": { + "smithy.api#documentation": "

                                                                      The error code of the report.

                                                                      " + } + }, + "errorMessage": { + "target": "com.amazonaws.inspector2#ErrorMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The error message of the report.

                                                                      " + } + }, + "destination": { + "target": "com.amazonaws.inspector2#Destination", + "traits": { + "smithy.api#documentation": "

                                                                      The destination of the report.

                                                                      " + } + }, + "filterCriteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      The filter criteria associated with the report.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#GetMember": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#GetMemberRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#GetMemberResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets member information for your organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/members/get", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#GetMemberRequest": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the member account to retrieve information on.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#GetMemberResponse": { + "type": "structure", + "members": { + "member": { + "target": "com.amazonaws.inspector2#Member", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the retrieved member account.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#GroupKey": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SCAN_STATUS_CODE", + "name": "SCAN_STATUS_CODE" + }, + { + "value": "SCAN_STATUS_REASON", + "name": "SCAN_STATUS_REASON" + }, + { + "value": "ACCOUNT_ID", + "name": "ACCOUNT_ID" + }, + { + "value": "RESOURCE_TYPE", + "name": "RESOURCE_TYPE" + }, + { + "value": "ECR_REPOSITORY_NAME", + "name": "ECR_REPOSITORY_NAME" + } + ] + } + }, + "com.amazonaws.inspector2#ImageHash": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 71, + "max": 71 + }, + "smithy.api#pattern": "^sha256:[a-z0-9]{64}$" + } + }, + "com.amazonaws.inspector2#ImageLayerAggregation": { + "type": "structure", + "members": { + "repositories": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The repository associated with the container image hosting the layers.

                                                                      " + } + }, + "resourceIds": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the container image layer.

                                                                      " + } + }, + "layerHashes": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The hashes associated with the layers.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#ImageLayerSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on container image layers.

                                                                      " + } + }, + "com.amazonaws.inspector2#ImageLayerAggregationResponse": { + "type": "structure", + "members": { + "repository": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The repository the layer resides in.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The resource ID of the container image layer.

                                                                      ", + "smithy.api#required": {} + } + }, + "layerHash": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The layer hash.

                                                                      ", + "smithy.api#required": {} + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account that owns the container image hosting the layer image.

                                                                      ", + "smithy.api#required": {} + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that represents the count of matched findings per severity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains the results of a finding aggregation by image layer.

                                                                      " + } + }, + "com.amazonaws.inspector2#ImageLayerSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#ImageTagList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#NonEmptyString" + } + }, + "com.amazonaws.inspector2#Inspector2": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Inspector2", + "arnNamespace": "inspector2", + "cloudFormationName": "Inspector2", + "cloudTrailEventSource": "inspector2.amazon.aws", + "endpointPrefix": "inspector2" + }, + "aws.auth#sigv4": { + "name": "inspector2" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": ["*"] + }, + "smithy.api#documentation": "

                                                                      Amazon Inspector is a vulnerability discovery service that automates continuous scanning for\n security vulnerabilities within your Amazon EC2 and Amazon ECR environments.

                                                                      ", + "smithy.api#title": "Inspector2" + }, + "version": "2020-06-08", + "operations": [ + { + "target": "com.amazonaws.inspector2#AssociateMember" + }, + { + "target": "com.amazonaws.inspector2#BatchGetAccountStatus" + }, + { + "target": "com.amazonaws.inspector2#BatchGetFreeTrialInfo" + }, + { + "target": "com.amazonaws.inspector2#CancelFindingsReport" + }, + { + "target": "com.amazonaws.inspector2#CreateFilter" + }, + { + "target": "com.amazonaws.inspector2#CreateFindingsReport" + }, + { + "target": "com.amazonaws.inspector2#DeleteFilter" + }, + { + "target": "com.amazonaws.inspector2#DescribeOrganizationConfiguration" + }, + { + "target": "com.amazonaws.inspector2#Disable" + }, + { + "target": "com.amazonaws.inspector2#DisableDelegatedAdminAccount" + }, + { + "target": "com.amazonaws.inspector2#DisassociateMember" + }, + { + "target": "com.amazonaws.inspector2#Enable" + }, + { + "target": "com.amazonaws.inspector2#EnableDelegatedAdminAccount" + }, + { + "target": "com.amazonaws.inspector2#GetDelegatedAdminAccount" + }, + { + "target": "com.amazonaws.inspector2#GetFindingsReportStatus" + }, + { + "target": "com.amazonaws.inspector2#GetMember" + }, + { + "target": "com.amazonaws.inspector2#ListAccountPermissions" + }, + { + "target": "com.amazonaws.inspector2#ListCoverage" + }, + { + "target": "com.amazonaws.inspector2#ListCoverageStatistics" + }, + { + "target": "com.amazonaws.inspector2#ListDelegatedAdminAccounts" + }, + { + "target": "com.amazonaws.inspector2#ListFilters" + }, + { + "target": "com.amazonaws.inspector2#ListFindingAggregations" + }, + { + "target": "com.amazonaws.inspector2#ListFindings" + }, + { + "target": "com.amazonaws.inspector2#ListMembers" + }, + { + "target": "com.amazonaws.inspector2#ListTagsForResource" + }, + { + "target": "com.amazonaws.inspector2#ListUsageTotals" + }, + { + "target": "com.amazonaws.inspector2#TagResource" + }, + { + "target": "com.amazonaws.inspector2#UntagResource" + }, + { + "target": "com.amazonaws.inspector2#UpdateFilter" + }, + { + "target": "com.amazonaws.inspector2#UpdateOrganizationConfiguration" + } + ] + }, + "com.amazonaws.inspector2#InspectorScoreDetails": { + "type": "structure", + "members": { + "adjustedCvss": { + "target": "com.amazonaws.inspector2#CvssScoreDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the CVSS score given to a finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the Amazon Inspector score given to a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The number of seconds to wait before retrying the request.

                                                                      ", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request has failed due to an internal failure of the Amazon Inspector service.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.inspector2#IpV4Address": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 7, + "max": 15 + }, + "smithy.api#pattern": "^(([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9]{2}|2[0-4][0-9]|25[0-5])$" + } + }, + "com.amazonaws.inspector2#IpV4AddressList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#IpV4Address" + } + }, + "com.amazonaws.inspector2#IpV6Address": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 47 + } + } + }, + "com.amazonaws.inspector2#IpV6AddressList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#IpV6Address" + } + }, + "com.amazonaws.inspector2#ListAccountPermissions": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListAccountPermissionsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListAccountPermissionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the permissions an account has to configure Amazon Inspector.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/accountpermissions/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "permissions", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListAccountPermissionsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#ListAccountPermissionsRequest": { + "type": "structure", + "members": { + "service": { + "target": "com.amazonaws.inspector2#Service", + "traits": { + "smithy.api#documentation": "

                                                                      The service scan type to check permissions for.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.inspector2#ListAccountPermissionsMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListAccountPermissionsResponse": { + "type": "structure", + "members": { + "permissions": { + "target": "com.amazonaws.inspector2#Permissions", + "traits": { + "smithy.api#documentation": "

                                                                      Contains details on the permissions an account has to configure Amazon Inspector.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListCoverage": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListCoverageRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListCoverageResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists coverage details for you environment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/coverage/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "coveredResources", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListCoverageMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 200 + } + } + }, + "com.amazonaws.inspector2#ListCoverageRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.inspector2#ListCoverageMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "filterCriteria": { + "target": "com.amazonaws.inspector2#CoverageFilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the filters to apply to the coverage data for your\n environment.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListCoverageResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "coveredResources": { + "target": "com.amazonaws.inspector2#CoveredResources", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the covered resources in your environment.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListCoverageStatistics": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListCoverageStatisticsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListCoverageStatisticsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists Amazon Inspector coverage statistics for your environment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/coverage/statistics/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "countsByGroup" + } + } + }, + "com.amazonaws.inspector2#ListCoverageStatisticsRequest": { + "type": "structure", + "members": { + "filterCriteria": { + "target": "com.amazonaws.inspector2#CoverageFilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the filters to apply to the coverage data for your\n environment.

                                                                      " + } + }, + "groupBy": { + "target": "com.amazonaws.inspector2#GroupKey", + "traits": { + "smithy.api#documentation": "

                                                                      The value to group the results by.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListCoverageStatisticsResponse": { + "type": "structure", + "members": { + "countsByGroup": { + "target": "com.amazonaws.inspector2#CountsList", + "traits": { + "smithy.api#documentation": "

                                                                      An array with the number for each group.

                                                                      " + } + }, + "totalCounts": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The total number for all groups.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListDelegatedAdminAccounts": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListDelegatedAdminAccountsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListDelegatedAdminAccountsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists information about the Amazon Inspector delegated administrator of your\n organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/delegatedadminaccounts/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "delegatedAdminAccounts", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListDelegatedAdminAccountsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.inspector2#ListDelegatedAdminMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListDelegatedAdminAccountsResponse": { + "type": "structure", + "members": { + "delegatedAdminAccounts": { + "target": "com.amazonaws.inspector2#DelegatedAdminAccountList", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the Amazon Inspector delegated administrator of your organization.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListDelegatedAdminMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.inspector2#ListFilterMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#ListFilters": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListFiltersRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListFiltersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the filters associated with your account.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/filters/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "filters", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListFiltersRequest": { + "type": "structure", + "members": { + "arns": { + "target": "com.amazonaws.inspector2#FilterArnList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon resource number (ARN) of the filter.

                                                                      " + } + }, + "action": { + "target": "com.amazonaws.inspector2#FilterAction", + "traits": { + "smithy.api#documentation": "

                                                                      The action the filter applies to matched findings.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.inspector2#ListFilterMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListFiltersResponse": { + "type": "structure", + "members": { + "filters": { + "target": "com.amazonaws.inspector2#FilterList", + "traits": { + "smithy.api#documentation": "

                                                                      Contains details on the filters associated with your account.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListFindingAggregations": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListFindingAggregationsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListFindingAggregationsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists aggregated finding data for your environment based on specific criteria.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/findings/aggregation/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "responses", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListFindingAggregationsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#ListFindingAggregationsRequest": { + "type": "structure", + "members": { + "aggregationType": { + "target": "com.amazonaws.inspector2#AggregationType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the aggregation request.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.inspector2#ListFindingAggregationsMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "accountIds": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account IDs to retrieve finding aggregation data for.

                                                                      " + } + }, + "aggregationRequest": { + "target": "com.amazonaws.inspector2#AggregationRequest", + "traits": { + "smithy.api#documentation": "

                                                                      Details of the aggregation request that is used to filter your aggregation results.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListFindingAggregationsResponse": { + "type": "structure", + "members": { + "aggregationType": { + "target": "com.amazonaws.inspector2#AggregationType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of aggregation to perform.

                                                                      ", + "smithy.api#required": {} + } + }, + "responses": { + "target": "com.amazonaws.inspector2#AggregationResponseList", + "traits": { + "smithy.api#documentation": "

                                                                      Objects that contain the results of an aggregation operation.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListFindings": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListFindingsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListFindingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists findings for your environment.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/findings/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "findings", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListFindingsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.inspector2#ListFindingsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.inspector2#ListFindingsMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "filterCriteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the filters to apply to your finding results.

                                                                      " + } + }, + "sortCriteria": { + "target": "com.amazonaws.inspector2#SortCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      Details on the sort criteria to apply to your finding results.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListFindingsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "findings": { + "target": "com.amazonaws.inspector2#FindingList", + "traits": { + "smithy.api#documentation": "

                                                                      Contains details on the findings in your environment.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListMembers": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListMembersRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListMembersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      List members associated with the Amazon Inspector delegated administrator for your\n organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/members/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "members", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListMembersMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 50 + } + } + }, + "com.amazonaws.inspector2#ListMembersRequest": { + "type": "structure", + "members": { + "onlyAssociated": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether to list only currently associated members if True or to\n list all members within the organization if False.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.inspector2#ListMembersMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListMembersResponse": { + "type": "structure", + "members": { + "members": { + "target": "com.amazonaws.inspector2#MemberList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details for each member account.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination parameter to be used on the next list operation to retrieve more\n items.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists all tags attached to a given resource.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.inspector2#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.inspector2#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon resource number (ARN) of the resource to list tags of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags associated with the resource.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListUsageTotals": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#ListUsageTotalsRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#ListUsageTotalsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the Amazon Inspector usage totals over the last 30 days.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/usage/list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "items": "totals", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.inspector2#ListUsageTotalsMaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 500 + } + } + }, + "com.amazonaws.inspector2#ListUsageTotalsNextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.inspector2#ListUsageTotalsRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.inspector2#ListUsageTotalsMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.inspector2#ListUsageTotalsNextToken", + "traits": { + "smithy.api#documentation": "

                                                                      A token to use for paginating results that are returned in the response. Set the value\n of this parameter to null for the first request to a list action. For subsequent calls, use\n the NextToken value returned from the previous request to continue listing\n results after the first page.

                                                                      " + } + }, + "accountIds": { + "target": "com.amazonaws.inspector2#UsageAccountIdList", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account IDs to retrieve usage totals for.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#ListUsageTotalsResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.inspector2#ListUsageTotalsNextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination parameter to be used on the next list operation to retrieve more items.

                                                                      " + } + }, + "totals": { + "target": "com.amazonaws.inspector2#UsageTotalList", + "traits": { + "smithy.api#documentation": "

                                                                      An object with details on the total usage for the requested account.

                                                                      " + } + } + } + }, + "com.amazonaws.inspector2#MapComparison": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EQUALS", + "name": "EQUALS" + } + ] + } + }, + "com.amazonaws.inspector2#MapFilter": { + "type": "structure", + "members": { + "comparison": { + "target": "com.amazonaws.inspector2#MapComparison", + "traits": { + "smithy.api#documentation": "

                                                                      The operator to use when comparing values in the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "key": { + "target": "com.amazonaws.inspector2#MapKey", + "traits": { + "smithy.api#documentation": "

                                                                      The tag key used in the filter.

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.inspector2#MapValue", + "traits": { + "smithy.api#documentation": "

                                                                      The tag value used in the filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes details of a map filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#MapFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#MapFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#MapKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + } + } + }, + "com.amazonaws.inspector2#MapValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 256 + } + } + }, + "com.amazonaws.inspector2#Member": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the member account.

                                                                      " + } + }, + "relationshipStatus": { + "target": "com.amazonaws.inspector2#RelationshipStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the member account.

                                                                      " + } + }, + "delegatedAdminAccountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID of the Amazon Inspector delegated administrator for this member account.

                                                                      " + } + }, + "updatedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      A timestamp showing when the status of this member was last updated.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details on a member account in your organization.

                                                                      " + } + }, + "com.amazonaws.inspector2#MemberList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Member" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.inspector2#MeteringAccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "[0-9]{12}" + } + }, + "com.amazonaws.inspector2#MeteringAccountIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#MeteringAccountId" + } + }, + "com.amazonaws.inspector2#MonthlyCostEstimate": { + "type": "double", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.inspector2#NetworkPath": { + "type": "structure", + "members": { + "steps": { + "target": "com.amazonaws.inspector2#StepList", + "traits": { + "smithy.api#documentation": "

                                                                      The details on the steps in the network path.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on the network path associated with a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#NetworkProtocol": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "TCP", + "name": "TCP" + }, + { + "value": "UDP", + "name": "UDP" + } + ] + } + }, + "com.amazonaws.inspector2#NetworkReachabilityDetails": { + "type": "structure", + "members": { + "openPortRange": { + "target": "com.amazonaws.inspector2#PortRange", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the open port range associated with a finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "protocol": { + "target": "com.amazonaws.inspector2#NetworkProtocol", + "traits": { + "smithy.api#documentation": "

                                                                      The protocol associated with a finding.

                                                                      ", + "smithy.api#required": {} + } + }, + "networkPath": { + "target": "com.amazonaws.inspector2#NetworkPath", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about a network path associated with a finding.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains the details of a network reachability finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "max": 1000000 + } + } + }, + "com.amazonaws.inspector2#NonEmptyString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + } + } + }, + "com.amazonaws.inspector2#NonEmptyStringList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#NonEmptyString" + } + }, + "com.amazonaws.inspector2#NumberFilter": { + "type": "structure", + "members": { + "upperInclusive": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      The highest number to be included in the filter.

                                                                      " + } + }, + "lowerInclusive": { + "target": "smithy.api#Double", + "traits": { + "smithy.api#documentation": "

                                                                      The lowest number to be included in the filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the details of a number filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#NumberFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#NumberFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#Operation": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLE_SCANNING", + "name": "ENABLE_SCANNING" + }, + { + "value": "DISABLE_SCANNING", + "name": "DISABLE_SCANNING" + }, + { + "value": "ENABLE_REPOSITORY", + "name": "ENABLE_REPOSITORY" + }, + { + "value": "DISABLE_REPOSITORY", + "name": "DISABLE_REPOSITORY" + } + ] + } + }, + "com.amazonaws.inspector2#OwnerId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 12, + "max": 34 + }, + "smithy.api#pattern": "(^\\d{12}$)|(^o-[a-z0-9]{10,32}$)" + } + }, + "com.amazonaws.inspector2#PackageAggregation": { + "type": "structure", + "members": { + "packageNames": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The names of packages to aggregate findings on.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#PackageSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on operating system package type.

                                                                      " + } + }, + "com.amazonaws.inspector2#PackageAggregationResponse": { + "type": "structure", + "members": { + "packageName": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the operating system package.

                                                                      ", + "smithy.api#required": {} + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account associated with the findings.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the count of matched findings per severity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains the results of a finding aggregation by image layer.

                                                                      " + } + }, + "com.amazonaws.inspector2#PackageArchitecture": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.inspector2#PackageEpoch": { + "type": "integer" + }, + "com.amazonaws.inspector2#PackageFilter": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.inspector2#StringFilter", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the name of the package to filter on.

                                                                      " + } + }, + "version": { + "target": "com.amazonaws.inspector2#StringFilter", + "traits": { + "smithy.api#documentation": "

                                                                      The package version to filter on.

                                                                      " + } + }, + "epoch": { + "target": "com.amazonaws.inspector2#NumberFilter", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the package epoch to filter on.

                                                                      " + } + }, + "release": { + "target": "com.amazonaws.inspector2#StringFilter", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the package release to filter on.

                                                                      " + } + }, + "architecture": { + "target": "com.amazonaws.inspector2#StringFilter", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the package architecture type to filter on.

                                                                      " + } + }, + "sourceLayerHash": { + "target": "com.amazonaws.inspector2#StringFilter", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details on the source layer hash to filter on.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains information on the details of a package filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#PackageFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#PackageFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#PackageManager": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "BUNDLER", + "name": "BUNDLER" + }, + { + "value": "CARGO", + "name": "CARGO" + }, + { + "value": "COMPOSER", + "name": "COMPOSER" + }, + { + "value": "NPM", + "name": "NPM" + }, + { + "value": "NUGET", + "name": "NUGET" + }, + { + "value": "PIPENV", + "name": "PIPENV" + }, + { + "value": "POETRY", + "name": "POETRY" + }, + { + "value": "YARN", + "name": "YARN" + }, + { + "value": "GOBINARY", + "name": "GOBINARY" + }, + { + "value": "GOMOD", + "name": "GOMOD" + }, + { + "value": "JAR", + "name": "JAR" + }, + { + "value": "OS", + "name": "OS" + } + ] + } + }, + "com.amazonaws.inspector2#PackageName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#PackageRelease": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#PackageSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#PackageVersion": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#PackageVulnerabilityDetails": { + "type": "structure", + "members": { + "vulnerabilityId": { + "target": "com.amazonaws.inspector2#VulnerabilityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID given to this vulnerability.

                                                                      ", + "smithy.api#required": {} + } + }, + "vulnerablePackages": { + "target": "com.amazonaws.inspector2#VulnerablePackageList", + "traits": { + "smithy.api#documentation": "

                                                                      The packages impacted by this vulnerability.

                                                                      ", + "smithy.api#required": {} + } + }, + "source": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The source of the vulnerability information.

                                                                      ", + "smithy.api#required": {} + } + }, + "cvss": { + "target": "com.amazonaws.inspector2#CvssScoreList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the CVSS score of a finding.

                                                                      " + } + }, + "relatedVulnerabilities": { + "target": "com.amazonaws.inspector2#VulnerabilityIdList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more vulnerabilities related to the one identified in this finding.

                                                                      " + } + }, + "sourceUrl": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      A URL to the source of the vulnerability information.

                                                                      " + } + }, + "vendorSeverity": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The severity the vendor has given to this vulnerability type.

                                                                      " + } + }, + "vendorCreatedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that this vulnerability was first added to the vendor's database.

                                                                      " + } + }, + "vendorUpdatedAt": { + "target": "com.amazonaws.inspector2#DateTimeTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time the vendor last updated this vulnerability in their database.

                                                                      " + } + }, + "referenceUrls": { + "target": "com.amazonaws.inspector2#NonEmptyStringList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more URLs that contain details about this vulnerability type.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a package vulnerability finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#Permission": { + "type": "structure", + "members": { + "service": { + "target": "com.amazonaws.inspector2#Service", + "traits": { + "smithy.api#documentation": "

                                                                      The services that the permissions allow an account to perform the given operations for.

                                                                      ", + "smithy.api#required": {} + } + }, + "operation": { + "target": "com.amazonaws.inspector2#Operation", + "traits": { + "smithy.api#documentation": "

                                                                      The operations that can be performed with the given permissions.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains information on the permissions an account has within Amazon Inspector.

                                                                      " + } + }, + "com.amazonaws.inspector2#Permissions": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Permission" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#Platform": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#Port": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 65535 + } + } + }, + "com.amazonaws.inspector2#PortRange": { + "type": "structure", + "members": { + "begin": { + "target": "com.amazonaws.inspector2#Port", + "traits": { + "smithy.api#documentation": "

                                                                      The beginning port in a port range.

                                                                      ", + "smithy.api#required": {} + } + }, + "end": { + "target": "com.amazonaws.inspector2#Port", + "traits": { + "smithy.api#documentation": "

                                                                      The ending port in a port range.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the port range associated with a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#PortRangeFilter": { + "type": "structure", + "members": { + "beginInclusive": { + "target": "com.amazonaws.inspector2#Port", + "traits": { + "smithy.api#documentation": "

                                                                      The port number the port range begins at.

                                                                      " + } + }, + "endInclusive": { + "target": "com.amazonaws.inspector2#Port", + "traits": { + "smithy.api#documentation": "

                                                                      The port number the port range ends at.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the details of a port range filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#PortRangeFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#PortRangeFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#Recommendation": { + "type": "structure", + "members": { + "text": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The recommended course of action to remediate the finding.

                                                                      " + } + }, + "Url": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The URL address to the CVE remediation recommendations.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the recommended course of action to remediate the finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#RelationshipStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATED", + "name": "CREATED" + }, + { + "value": "INVITED", + "name": "INVITED" + }, + { + "value": "DISABLED", + "name": "DISABLED" + }, + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "REMOVED", + "name": "REMOVED" + }, + { + "value": "RESIGNED", + "name": "RESIGNED" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "EMAIL_VERIFICATION_IN_PROGRESS", + "name": "EMAIL_VERIFICATION_IN_PROGRESS" + }, + { + "value": "EMAIL_VERIFICATION_FAILED", + "name": "EMAIL_VERIFICATION_FAILED" + }, + { + "value": "REGION_DISABLED", + "name": "REGION_DISABLED" + }, + { + "value": "ACCOUNT_SUSPENDED", + "name": "ACCOUNT_SUSPENDED" + }, + { + "value": "CANNOT_CREATE_DETECTOR_IN_ORG_MASTER", + "name": "CANNOT_CREATE_DETECTOR_IN_ORG_MASTER" + } + ] + } + }, + "com.amazonaws.inspector2#Remediation": { + "type": "structure", + "members": { + "recommendation": { + "target": "com.amazonaws.inspector2#Recommendation", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the recommended course of action to remediate the finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on how to remediate a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#ReportFormat": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CSV", + "name": "CSV" + }, + { + "value": "JSON", + "name": "JSON" + } + ] + } + }, + "com.amazonaws.inspector2#ReportId": { + "type": "string", + "traits": { + "smithy.api#pattern": "\\b[a-f0-9]{8}\\b-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-\\b[a-f0-9]{12}\\b" + } + }, + "com.amazonaws.inspector2#ReportingErrorCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "INVALID_PERMISSIONS", + "name": "INVALID_PERMISSIONS" + } + ] + } + }, + "com.amazonaws.inspector2#RepositoryAggregation": { + "type": "structure", + "members": { + "repositories": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The names of repositories to aggregate findings on.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#RepositorySortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on repository.

                                                                      " + } + }, + "com.amazonaws.inspector2#RepositoryAggregationResponse": { + "type": "structure", + "members": { + "repository": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the repository associated with the findings.

                                                                      ", + "smithy.api#required": {} + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account associated with the findings.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that represent the count of matched findings per severity.

                                                                      " + } + }, + "affectedImages": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The number of container images impacted by the findings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains details on the results of a finding aggregation by repository.

                                                                      " + } + }, + "com.amazonaws.inspector2#RepositorySortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "AFFECTED_IMAGES", + "name": "AFFECTED_IMAGES" + } + ] + } + }, + "com.amazonaws.inspector2#Resource": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.inspector2#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "id": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "partition": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The partition of the resource.

                                                                      " + } + }, + "region": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services Region the impacted resource is located in.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags attached to the resource.

                                                                      " + } + }, + "details": { + "target": "com.amazonaws.inspector2#ResourceDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the resource involved in a finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the resource involved in a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#ResourceDetails": { + "type": "structure", + "members": { + "awsEc2Instance": { + "target": "com.amazonaws.inspector2#AwsEc2InstanceDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the Amazon EC2 instance involved in the finding.

                                                                      " + } + }, + "awsEcrContainerImage": { + "target": "com.amazonaws.inspector2#AwsEcrContainerImageDetails", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the Amazon ECR container image involved in the finding.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about the resource involved in the finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#ResourceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 10, + "max": 341 + }, + "smithy.api#pattern": "(^arn:.*:ecr:.*:\\d{12}:repository\\/(?:[a-z0-9]+(?:[._-][a-z0-9]+)*\\/)*[a-z0-9]+(?:[._-][a-z0-9]+)*(\\/sha256:[a-z0-9]{64})?$)|(^i-([a-z0-9]{8}|[a-z0-9]{17}|\\\\*)$)" + } + }, + "com.amazonaws.inspector2#ResourceList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Resource" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The operation tried to access an invalid resource. Make sure the resource is specified correctly.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.inspector2#ResourceScanMetadata": { + "type": "structure", + "members": { + "ecrRepository": { + "target": "com.amazonaws.inspector2#EcrRepositoryMetadata", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the repository an Amazon ECR image resides in.

                                                                      " + } + }, + "ecrImage": { + "target": "com.amazonaws.inspector2#EcrContainerImageMetadata", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the container metadata for an Amazon ECR image.

                                                                      " + } + }, + "ec2": { + "target": "com.amazonaws.inspector2#Ec2Metadata", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains metadata details for an Amazon EC2 instance.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains details about the metadata for an Amazon ECR resource.

                                                                      " + } + }, + "com.amazonaws.inspector2#ResourceScanType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EC2", + "name": "EC2" + }, + { + "value": "ECR", + "name": "ECR" + } + ] + } + }, + "com.amazonaws.inspector2#ResourceState": { + "type": "structure", + "members": { + "ec2": { + "target": "com.amazonaws.inspector2#State", + "traits": { + "smithy.api#documentation": "

                                                                      An object detailing the state of Amazon Inspector scanning for Amazon EC2 resources.

                                                                      ", + "smithy.api#required": {} + } + }, + "ecr": { + "target": "com.amazonaws.inspector2#State", + "traits": { + "smithy.api#documentation": "

                                                                      An object detailing the state of Amazon Inspector scanning for Amazon ECR resources.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details the state of Amazon Inspector for each resource type Amazon Inspector scans.

                                                                      " + } + }, + "com.amazonaws.inspector2#ResourceStatus": { + "type": "structure", + "members": { + "ec2": { + "target": "com.amazonaws.inspector2#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of Amazon Inspector scanning for Amazon EC2 resources.

                                                                      ", + "smithy.api#required": {} + } + }, + "ecr": { + "target": "com.amazonaws.inspector2#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of Amazon Inspector scanning for Amazon ECR resources.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details the status of Amazon Inspector for each resource type Amazon Inspector scans.

                                                                      " + } + }, + "com.amazonaws.inspector2#ResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_EC2_INSTANCE", + "name": "AWS_EC2_INSTANCE" + }, + { + "value": "AWS_ECR_CONTAINER_IMAGE", + "name": "AWS_ECR_CONTAINER_IMAGE" + }, + { + "value": "AWS_ECR_REPOSITORY", + "name": "AWS_ECR_REPOSITORY" + } + ] + } + }, + "com.amazonaws.inspector2#ScanStatus": { + "type": "structure", + "members": { + "statusCode": { + "target": "com.amazonaws.inspector2#ScanStatusCode", + "traits": { + "smithy.api#documentation": "

                                                                      The status code of the scan.

                                                                      ", + "smithy.api#required": {} + } + }, + "reason": { + "target": "com.amazonaws.inspector2#ScanStatusReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the scan.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The status of the scan.

                                                                      " + } + }, + "com.amazonaws.inspector2#ScanStatusCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "INACTIVE", + "name": "INACTIVE" + } + ] + } + }, + "com.amazonaws.inspector2#ScanStatusReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING_INITIAL_SCAN", + "name": "PENDING_INITIAL_SCAN" + }, + { + "value": "ACCESS_DENIED", + "name": "ACCESS_DENIED" + }, + { + "value": "INTERNAL_ERROR", + "name": "INTERNAL_ERROR" + }, + { + "value": "UNMANAGED_EC2_INSTANCE", + "name": "UNMANAGED_EC2_INSTANCE" + }, + { + "value": "UNSUPPORTED_OS", + "name": "UNSUPPORTED_OS" + }, + { + "value": "SCAN_ELIGIBILITY_EXPIRED", + "name": "SCAN_ELIGIBILITY_EXPIRED" + }, + { + "value": "RESOURCE_TERMINATED", + "name": "RESOURCE_TERMINATED" + }, + { + "value": "SUCCESSFUL", + "name": "SUCCESSFUL" + }, + { + "value": "NO_RESOURCES_FOUND", + "name": "NO_RESOURCES_FOUND" + }, + { + "value": "IMAGE_SIZE_EXCEEDED", + "name": "IMAGE_SIZE_EXCEEDED" + }, + { + "value": "SCAN_FREQUENCY_MANUAL", + "name": "SCAN_FREQUENCY_MANUAL" + }, + { + "value": "SCAN_FREQUENCY_SCAN_ON_PUSH", + "name": "SCAN_FREQUENCY_SCAN_ON_PUSH" + }, + { + "value": "EC2_INSTANCE_STOPPED", + "name": "EC2_INSTANCE_STOPPED" + } + ] + } + }, + "com.amazonaws.inspector2#ScanType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "NETWORK", + "name": "NETWORK" + }, + { + "value": "PACKAGE", + "name": "PACKAGE" + } + ] + } + }, + "com.amazonaws.inspector2#Service": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EC2", + "name": "EC2" + }, + { + "value": "ECR", + "name": "ECR" + } + ] + } + }, + "com.amazonaws.inspector2#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the resource that exceeds a service quota.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      You have exceeded your service quota. To perform the requested action, remove some of\n the relevant resources, or use Service Quotas to request a service quota increase.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.inspector2#Severity": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INFORMATIONAL", + "name": "INFORMATIONAL" + }, + { + "value": "LOW", + "name": "LOW" + }, + { + "value": "MEDIUM", + "name": "MEDIUM" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "UNTRIAGED", + "name": "UNTRIAGED" + } + ] + } + }, + "com.amazonaws.inspector2#SeverityCounts": { + "type": "structure", + "members": { + "all": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The total count of findings from all severities.

                                                                      " + } + }, + "medium": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The total count of medium severity findings.

                                                                      " + } + }, + "high": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The total count of high severity findings.

                                                                      " + } + }, + "critical": { + "target": "smithy.api#Long", + "traits": { + "smithy.api#documentation": "

                                                                      The total count of critical severity findings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the counts of aggregated finding per severity.

                                                                      " + } + }, + "com.amazonaws.inspector2#SortCriteria": { + "type": "structure", + "members": { + "field": { + "target": "com.amazonaws.inspector2#SortField", + "traits": { + "smithy.api#documentation": "

                                                                      The finding detail field by which results are sorted.

                                                                      ", + "smithy.api#required": {} + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order by which findings are sorted.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the criteria used to sort finding results.

                                                                      " + } + }, + "com.amazonaws.inspector2#SortField": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_ACCOUNT_ID", + "name": "AWS_ACCOUNT_ID" + }, + { + "value": "FINDING_TYPE", + "name": "FINDING_TYPE" + }, + { + "value": "SEVERITY", + "name": "SEVERITY" + }, + { + "value": "FIRST_OBSERVED_AT", + "name": "FIRST_OBSERVED_AT" + }, + { + "value": "LAST_OBSERVED_AT", + "name": "LAST_OBSERVED_AT" + }, + { + "value": "FINDING_STATUS", + "name": "FINDING_STATUS" + }, + { + "value": "RESOURCE_TYPE", + "name": "RESOURCE_TYPE" + }, + { + "value": "ECR_IMAGE_PUSHED_AT", + "name": "ECR_IMAGE_PUSHED_AT" + }, + { + "value": "ECR_IMAGE_REPOSITORY_NAME", + "name": "ECR_IMAGE_REPOSITORY_NAME" + }, + { + "value": "ECR_IMAGE_REGISTRY", + "name": "ECR_IMAGE_REGISTRY" + }, + { + "value": "NETWORK_PROTOCOL", + "name": "NETWORK_PROTOCOL" + }, + { + "value": "COMPONENT_TYPE", + "name": "COMPONENT_TYPE" + }, + { + "value": "VULNERABILITY_ID", + "name": "VULNERABILITY_ID" + }, + { + "value": "VULNERABILITY_SOURCE", + "name": "VULNERABILITY_SOURCE" + }, + { + "value": "INSPECTOR_SCORE", + "name": "INSPECTOR_SCORE" + }, + { + "value": "VENDOR_SEVERITY", + "name": "VENDOR_SEVERITY" + } + ] + } + }, + "com.amazonaws.inspector2#SortOrder": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ASC", + "name": "ASC" + }, + { + "value": "DESC", + "name": "DESC" + } + ] + } + }, + "com.amazonaws.inspector2#SourceLayerHash": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 71, + "max": 71 + }, + "smithy.api#pattern": "^sha256:[a-z0-9]{64}$" + } + }, + "com.amazonaws.inspector2#State": { + "type": "structure", + "members": { + "status": { + "target": "com.amazonaws.inspector2#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of Amazon Inspector for the account.

                                                                      ", + "smithy.api#required": {} + } + }, + "errorCode": { + "target": "com.amazonaws.inspector2#ErrorCode", + "traits": { + "smithy.api#documentation": "

                                                                      The error code explaining why the account failed to enable Amazon Inspector.

                                                                      ", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The error message received when the account failed to enable Amazon Inspector.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that described the state of Amazon Inspector scans for an account.

                                                                      " + } + }, + "com.amazonaws.inspector2#Status": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENABLING", + "name": "ENABLING" + }, + { + "value": "ENABLED", + "name": "ENABLED" + }, + { + "value": "DISABLING", + "name": "DISABLING" + }, + { + "value": "DISABLED", + "name": "DISABLED" + }, + { + "value": "SUSPENDING", + "name": "SUSPENDING" + }, + { + "value": "SUSPENDED", + "name": "SUSPENDED" + } + ] + } + }, + "com.amazonaws.inspector2#Step": { + "type": "structure", + "members": { + "componentId": { + "target": "com.amazonaws.inspector2#Component", + "traits": { + "smithy.api#documentation": "

                                                                      The component ID.

                                                                      ", + "smithy.api#required": {} + } + }, + "componentType": { + "target": "com.amazonaws.inspector2#ComponentType", + "traits": { + "smithy.api#documentation": "

                                                                      The component type.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details about the step associated with a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#StepList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Step" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 30 + } + } + }, + "com.amazonaws.inspector2#StringComparison": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EQUALS", + "name": "EQUALS" + }, + { + "value": "PREFIX", + "name": "PREFIX" + }, + { + "value": "NOT_EQUALS", + "name": "NOT_EQUALS" + } + ] + } + }, + "com.amazonaws.inspector2#StringFilter": { + "type": "structure", + "members": { + "comparison": { + "target": "com.amazonaws.inspector2#StringComparison", + "traits": { + "smithy.api#documentation": "

                                                                      The operator to use when comparing values in the filter

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.inspector2#StringInput", + "traits": { + "smithy.api#documentation": "

                                                                      The value to filter on.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the details of a string filter.

                                                                      " + } + }, + "com.amazonaws.inspector2#StringFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#StringFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.inspector2#StringInput": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.inspector2#StringList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#NonEmptyString" + } + }, + "com.amazonaws.inspector2#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" + } + }, + "com.amazonaws.inspector2#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.inspector2#TagList": { + "type": "list", + "member": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.inspector2#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.inspector2#MapKey" + }, + "value": { + "target": "com.amazonaws.inspector2#MapValue" + } + }, + "com.amazonaws.inspector2#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#BadRequestException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Adds tags to a resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.inspector2#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.inspector2#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource to apply a tag to.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.inspector2#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to be added to a resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.inspector2#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The limit on the number of requests per second was exceeded.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.inspector2#TitleAggregation": { + "type": "structure", + "members": { + "titles": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The finding titles to aggregate on.

                                                                      " + } + }, + "vulnerabilityIds": { + "target": "com.amazonaws.inspector2#StringFilterList", + "traits": { + "smithy.api#documentation": "

                                                                      The vulnerability IDs of the findings.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.inspector2#AggregationResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type to aggregate on.

                                                                      " + } + }, + "sortOrder": { + "target": "com.amazonaws.inspector2#SortOrder", + "traits": { + "smithy.api#documentation": "

                                                                      The order to sort results by.

                                                                      " + } + }, + "sortBy": { + "target": "com.amazonaws.inspector2#TitleSortBy", + "traits": { + "smithy.api#documentation": "

                                                                      The value to sort results by.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The details that define an aggregation based on finding title.

                                                                      " + } + }, + "com.amazonaws.inspector2#TitleAggregationResponse": { + "type": "structure", + "members": { + "title": { + "target": "com.amazonaws.inspector2#NonEmptyString", + "traits": { + "smithy.api#documentation": "

                                                                      The title that the findings were aggregated on.

                                                                      ", + "smithy.api#required": {} + } + }, + "vulnerabilityId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The vulnerability ID of the finding.

                                                                      " + } + }, + "accountId": { + "target": "com.amazonaws.inspector2#AccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Web Services account associated with the findings.

                                                                      " + } + }, + "severityCounts": { + "target": "com.amazonaws.inspector2#SeverityCounts", + "traits": { + "smithy.api#documentation": "

                                                                      An object that represent the count of matched findings per severity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A response that contains details on the results of a finding aggregation by title.

                                                                      " + } + }, + "com.amazonaws.inspector2#TitleSortBy": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CRITICAL", + "name": "CRITICAL" + }, + { + "value": "HIGH", + "name": "HIGH" + }, + { + "value": "ALL", + "name": "ALL" + } + ] + } + }, + "com.amazonaws.inspector2#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes tags from a resource.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn}", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.inspector2#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for the resource to remove tags from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.inspector2#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      The tag keys to remove from the resource.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.inspector2#UpdateFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#UpdateFilterRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#UpdateFilterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the action that is to be applied to the findings that match the filter.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/filters/update", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#UpdateFilterRequest": { + "type": "structure", + "members": { + "action": { + "target": "com.amazonaws.inspector2#FilterAction", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the action that is to be applied to the findings that match the filter.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.inspector2#FilterDescription", + "traits": { + "smithy.api#documentation": "

                                                                      A description of the filter.

                                                                      " + } + }, + "filterCriteria": { + "target": "com.amazonaws.inspector2#FilterCriteria", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the criteria to be update in the filter.

                                                                      " + } + }, + "name": { + "target": "com.amazonaws.inspector2#FilterName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the filter.

                                                                      " + } + }, + "filterArn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the filter to update.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#UpdateFilterResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.inspector2#FilterArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Number (ARN) of the successfully updated filter.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#UpdateOrganizationConfiguration": { + "type": "operation", + "input": { + "target": "com.amazonaws.inspector2#UpdateOrganizationConfigurationRequest" + }, + "output": { + "target": "com.amazonaws.inspector2#UpdateOrganizationConfigurationResponse" + }, + "errors": [ + { + "target": "com.amazonaws.inspector2#AccessDeniedException" + }, + { + "target": "com.amazonaws.inspector2#InternalServerException" + }, + { + "target": "com.amazonaws.inspector2#ThrottlingException" + }, + { + "target": "com.amazonaws.inspector2#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the configurations for your Amazon Inspector organization.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/organizationconfiguration/update", + "code": 200 + } + } + }, + "com.amazonaws.inspector2#UpdateOrganizationConfigurationRequest": { + "type": "structure", + "members": { + "autoEnable": { + "target": "com.amazonaws.inspector2#AutoEnable", + "traits": { + "smithy.api#documentation": "

                                                                      Defines which scan types are enabled automatically for new members of your Amazon Inspector organization.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#UpdateOrganizationConfigurationResponse": { + "type": "structure", + "members": { + "autoEnable": { + "target": "com.amazonaws.inspector2#AutoEnable", + "traits": { + "smithy.api#documentation": "

                                                                      The updated status of scan types automatically enabled for new members of your Amazon Inspector organization.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.inspector2#Usage": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.inspector2#UsageType", + "traits": { + "smithy.api#documentation": "

                                                                      The type scan.

                                                                      " + } + }, + "total": { + "target": "com.amazonaws.inspector2#UsageValue", + "traits": { + "smithy.api#documentation": "

                                                                      The total of usage.

                                                                      " + } + }, + "estimatedMonthlyCost": { + "target": "com.amazonaws.inspector2#MonthlyCostEstimate", + "traits": { + "smithy.api#documentation": "

                                                                      The estimated monthly cost of Amazon Inspector.

                                                                      " + } + }, + "currency": { + "target": "com.amazonaws.inspector2#Currency", + "traits": { + "smithy.api#documentation": "

                                                                      The currency type used when calculating usage data.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains usage information about the cost of Amazon Inspector operation.

                                                                      " + } + }, + "com.amazonaws.inspector2#UsageAccountId": { + "type": "string", + "traits": { + "smithy.api#pattern": "[0-9]{12}" + } + }, + "com.amazonaws.inspector2#UsageAccountIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#UsageAccountId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5000 + } + } + }, + "com.amazonaws.inspector2#UsageList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#Usage" + } + }, + "com.amazonaws.inspector2#UsageTotal": { + "type": "structure", + "members": { + "accountId": { + "target": "com.amazonaws.inspector2#MeteringAccountId", + "traits": { + "smithy.api#documentation": "

                                                                      The account ID of the account that usage data was retrieved for.

                                                                      " + } + }, + "usage": { + "target": "com.amazonaws.inspector2#UsageList", + "traits": { + "smithy.api#documentation": "

                                                                      An object representing the total usage for an account.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The total of usage for an account ID.

                                                                      " + } + }, + "com.amazonaws.inspector2#UsageTotalList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#UsageTotal" + } + }, + "com.amazonaws.inspector2#UsageType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EC2_INSTANCE_HOURS", + "name": "EC2_INSTANCE_HOURS" + }, + { + "value": "ECR_INITIAL_SCAN", + "name": "ECR_INITIAL_SCAN" + }, + { + "value": "ECR_RESCAN", + "name": "ECR_RESCAN" + } + ] + } + }, + "com.amazonaws.inspector2#UsageValue": { + "type": "double", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.inspector2#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "reason": { + "target": "com.amazonaws.inspector2#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the validation failure.

                                                                      ", + "smithy.api#required": {} + } + }, + "fields": { + "target": "com.amazonaws.inspector2#ValidationExceptionFields", + "traits": { + "smithy.api#documentation": "

                                                                      The fields that failed validation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request has failed validation due to missing required fields or having invalid\n inputs.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.inspector2#ValidationExceptionField": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the validation exception.

                                                                      ", + "smithy.api#required": {} + } + }, + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The validation exception message.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes a validation exception.

                                                                      " + } + }, + "com.amazonaws.inspector2#ValidationExceptionFields": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#ValidationExceptionField" + } + }, + "com.amazonaws.inspector2#ValidationExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CANNOT_PARSE", + "name": "CANNOT_PARSE" + }, + { + "value": "FIELD_VALIDATION_FAILED", + "name": "FIELD_VALIDATION_FAILED" + }, + { + "value": "OTHER", + "name": "OTHER" + } + ] + } + }, + "com.amazonaws.inspector2#VulnerabilityId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, + "com.amazonaws.inspector2#VulnerabilityIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#VulnerabilityId" + } + }, + "com.amazonaws.inspector2#VulnerablePackage": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.inspector2#PackageName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the vulnerable package.

                                                                      ", + "smithy.api#required": {} + } + }, + "version": { + "target": "com.amazonaws.inspector2#PackageVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the vulnerable package.

                                                                      ", + "smithy.api#required": {} + } + }, + "sourceLayerHash": { + "target": "com.amazonaws.inspector2#SourceLayerHash", + "traits": { + "smithy.api#documentation": "

                                                                      The source layer hash of the vulnerable package.

                                                                      " + } + }, + "epoch": { + "target": "com.amazonaws.inspector2#PackageEpoch", + "traits": { + "smithy.api#documentation": "

                                                                      The epoch of the vulnerable package.

                                                                      " + } + }, + "release": { + "target": "com.amazonaws.inspector2#PackageRelease", + "traits": { + "smithy.api#documentation": "

                                                                      The release of the vulnerable package.

                                                                      " + } + }, + "arch": { + "target": "com.amazonaws.inspector2#PackageArchitecture", + "traits": { + "smithy.api#documentation": "

                                                                      The architecture of the vulnerable package.

                                                                      " + } + }, + "packageManager": { + "target": "com.amazonaws.inspector2#PackageManager", + "traits": { + "smithy.api#documentation": "

                                                                      The package manager of the vulnerable package.

                                                                      " + } + }, + "filePath": { + "target": "com.amazonaws.inspector2#FilePath", + "traits": { + "smithy.api#documentation": "

                                                                      The file path of the vulnerable package.

                                                                      " + } + }, + "fixedInVersion": { + "target": "com.amazonaws.inspector2#PackageVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the package that contains the vulnerability fix.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information on the vulnerable package identified by a finding.

                                                                      " + } + }, + "com.amazonaws.inspector2#VulnerablePackageList": { + "type": "list", + "member": { + "target": "com.amazonaws.inspector2#VulnerablePackage" + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/iot.json b/codegen/sdk-codegen/aws-models/iot.json index 934178e82a4c..a8f5e1b88073 100644 --- a/codegen/sdk-codegen/aws-models/iot.json +++ b/codegen/sdk-codegen/aws-models/iot.json @@ -10900,7 +10900,7 @@ "schema": { "target": "com.amazonaws.iot#IndexSchema", "traits": { - "smithy.api#documentation": "

                                                                      Contains a value that specifies the type of indexing performed. Valid values\n are:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        REGISTRY – Your thing index contains only registry data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_SHADOW - Your thing index contains registry data and shadow data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_CONNECTIVITY_STATUS - Your thing index contains registry data and\n thing connectivity status data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_SHADOW_AND_CONNECTIVITY_STATUS - Your thing index contains registry\n data, shadow data, and thing connectivity status data.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Contains a value that specifies the type of indexing performed. Valid values\n are:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        REGISTRY – Your thing index contains only registry data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_SHADOW - Your thing index contains registry data and shadow data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_CONNECTIVITY_STATUS - Your thing index contains registry data and\n thing connectivity status data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        REGISTRY_AND_SHADOW_AND_CONNECTIVITY_STATUS - Your thing index contains registry\n data, shadow data, and thing connectivity status data.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        MULTI_INDEXING_MODE - Your thing index contains multiple data sources. For more information, see \n GetIndexingConfiguration.

                                                                        \n
                                                                      • \n
                                                                      " } } } @@ -12824,6 +12824,21 @@ ] } }, + "com.amazonaws.iot#DeviceDefenderIndexingMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "OFF", + "name": "OFF" + }, + { + "value": "VIOLATIONS", + "name": "VIOLATIONS" + } + ] + } + }, "com.amazonaws.iot#DeviceDefenderThingName": { "type": "string", "traits": { @@ -22028,6 +22043,21 @@ } } }, + "com.amazonaws.iot#NamedShadowIndexingMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "OFF", + "name": "OFF" + }, + { + "value": "ON", + "name": "ON" + } + ] + } + }, "com.amazonaws.iot#NamespaceId": { "type": "string", "traits": { @@ -26739,7 +26769,13 @@ "shadow": { "target": "com.amazonaws.iot#JsonDocument", "traits": { - "smithy.api#documentation": "

                                                                      The shadow.

                                                                      " + "smithy.api#documentation": "

                                                                      The unnamed shadow and named shadow.

                                                                      \n

                                                                      For more information about shadows, see IoT Device Shadow service.\n

                                                                      " + } + }, + "deviceDefender": { + "target": "com.amazonaws.iot#JsonDocument", + "traits": { + "smithy.api#documentation": "

                                                                      Contains Device Defender data.

                                                                      \n

                                                                      For more information about Device Defender, see Device Defender.

                                                                      " } }, "connectivity": { @@ -26973,6 +27009,18 @@ "smithy.api#documentation": "

                                                                      Thing connectivity indexing mode. Valid values are:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        STATUS – Your thing index contains connectivity status. To enable thing\n connectivity indexing, thingIndexMode must not be set to\n OFF.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        OFF - Thing connectivity status indexing is disabled.

                                                                        \n
                                                                      • \n
                                                                      " } }, + "deviceDefenderIndexingMode": { + "target": "com.amazonaws.iot#DeviceDefenderIndexingMode", + "traits": { + "smithy.api#documentation": "

                                                                      Device Defender indexing mode. Valid values are:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        VIOLATIONS – Your thing index contains Device Defender violations. To enable Device\n Defender indexing, deviceDefenderIndexingMode must not be set to\n OFF.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        OFF - Device Defender indexing is disabled.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about Device Defender violations, see Device Defender Detect.\n

                                                                      " + } + }, + "namedShadowIndexingMode": { + "target": "com.amazonaws.iot#NamedShadowIndexingMode", + "traits": { + "smithy.api#documentation": "

                                                                      Named shadow indexing mode. Valid values are:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        ON – Your thing index contains named shadow. To enable thing\n named shadow indexing, namedShadowIndexingMode must not be set to\n OFF.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        OFF - Named shadow indexing is disabled.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about Shadows, see IoT Device Shadow service.\n

                                                                      " + } + }, "managedFields": { "target": "com.amazonaws.iot#Fields", "traits": { @@ -28892,7 +28940,7 @@ "unit": { "target": "com.amazonaws.iot#FleetMetricUnit", "traits": { - "smithy.api#documentation": "

                                                                      Used to support unit transformation such as milliseconds to seconds. The unit must be\n supported by CW metric.

                                                                      " + "smithy.api#documentation": "

                                                                      Used to support unit transformation such as milliseconds to seconds. The unit must be\n supported by CW metric.

                                                                      " } }, "expectedVersion": { diff --git a/codegen/sdk-codegen/aws-models/iotsitewise.json b/codegen/sdk-codegen/aws-models/iotsitewise.json index 01b35293eb91..93a7b0b21cea 100644 --- a/codegen/sdk-codegen/aws-models/iotsitewise.json +++ b/codegen/sdk-codegen/aws-models/iotsitewise.json @@ -2577,7 +2577,7 @@ "portalAuthMode": { "target": "com.amazonaws.iotsitewise#AuthMode", "traits": { - "smithy.api#documentation": "

                                                                      The service to use to authenticate users to the portal. Choose from the following\n options:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SSO – The portal uses Amazon Web Services Single Sign On to authenticate users and manage\n user permissions. Before you can create a portal that uses Amazon Web Services SSO, you must enable Amazon Web Services SSO.\n For more information, see Enabling Amazon Web Services SSO in the\n IoT SiteWise User Guide. This option is only available in Amazon Web Services Regions other than\n the China Regions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IAM – The portal uses Identity and Access Management to authenticate users and manage\n user permissions. This option is only available in the China Regions.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      You can't change this value after you create a portal.

                                                                      \n

                                                                      Default: SSO\n

                                                                      " + "smithy.api#documentation": "

                                                                      The service to use to authenticate users to the portal. Choose from the following\n options:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SSO – The portal uses Amazon Web Services Single Sign On to authenticate users and manage\n user permissions. Before you can create a portal that uses Amazon Web Services SSO, you must enable Amazon Web Services SSO.\n For more information, see Enabling Amazon Web Services SSO in the\n IoT SiteWise User Guide. This option is only available in Amazon Web Services Regions other than\n the China Regions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IAM – The portal uses Identity and Access Management to authenticate users and manage\n user permissions.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      You can't change this value after you create a portal.

                                                                      \n

                                                                      Default: SSO\n

                                                                      " } }, "notificationSenderEmail": { @@ -4560,7 +4560,7 @@ "storageType": { "target": "com.amazonaws.iotsitewise#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      The type of storage that you specified for your data. The storage type can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The storage tier that you specified for your data. \n The storageType parameter can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. \n The hot tier is a service-managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. \n The cold tier is a customer-managed Amazon S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -4576,6 +4576,12 @@ "smithy.api#documentation": "

                                                                      Contains the storage configuration for time series (data streams) that aren't associated with asset properties. \n The disassociatedDataStorage can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties.

                                                                        \n \n

                                                                        After the disassociatedDataStorage is enabled, you can't disable it.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Data streams \n in the IoT SiteWise User Guide.

                                                                      " } }, + "retentionPeriod": { + "target": "com.amazonaws.iotsitewise#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                                                                      " + } + }, "configurationStatus": { "target": "com.amazonaws.iotsitewise#ConfigurationStatus", "traits": { @@ -7311,6 +7317,15 @@ "smithy.api#pattern": "^[A-Za-z0-9+/=]+$" } }, + "com.amazonaws.iotsitewise#NumberOfDays": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 30 + } + } + }, "com.amazonaws.iotsitewise#Offset": { "type": "string", "traits": { @@ -7967,7 +7982,7 @@ "storageType": { "target": "com.amazonaws.iotsitewise#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      The type of storage that you specified for your data. The storage type can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The storage tier that you specified for your data. \n The storageType parameter can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. \n The hot tier is a service-managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. \n The cold tier is a customer-managed Amazon S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -7982,6 +7997,9 @@ "traits": { "smithy.api#documentation": "

                                                                      Contains the storage configuration for time series (data streams) that aren't associated with asset properties. \n The disassociatedDataStorage can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties.

                                                                        \n \n

                                                                        After the disassociatedDataStorage is enabled, you can't disable it.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Data streams \n in the IoT SiteWise User Guide.

                                                                      " } + }, + "retentionPeriod": { + "target": "com.amazonaws.iotsitewise#RetentionPeriod" } } }, @@ -7991,7 +8009,7 @@ "storageType": { "target": "com.amazonaws.iotsitewise#StorageType", "traits": { - "smithy.api#documentation": "

                                                                      The type of storage that you specified for your data. The storage type can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise replicates your data into a service managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise replicates your data into a service managed database and saves a copy of your raw data and metadata in an Amazon S3 object that you specified.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The storage tier that you specified for your data. \n The storageType parameter can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n SITEWISE_DEFAULT_STORAGE – IoT SiteWise saves your data into the hot tier. \n The hot tier is a service-managed database.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n MULTI_LAYER_STORAGE – IoT SiteWise saves your data in both the cold tier and the cold tier. \n The cold tier is a customer-managed Amazon S3 bucket.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -8007,6 +8025,9 @@ "smithy.api#documentation": "

                                                                      Contains the storage configuration for time series (data streams) that aren't associated with asset properties. \n The disassociatedDataStorage can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n ENABLED – IoT SiteWise accepts time series that aren't associated with asset properties.

                                                                        \n \n

                                                                        After the disassociatedDataStorage is enabled, you can't disable it.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DISABLED – IoT SiteWise doesn't accept time series (data streams) that aren't associated with asset properties.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Data streams \n in the IoT SiteWise User Guide.

                                                                      " } }, + "retentionPeriod": { + "target": "com.amazonaws.iotsitewise#RetentionPeriod" + }, "configurationStatus": { "target": "com.amazonaws.iotsitewise#ConfigurationStatus", "traits": { @@ -8143,6 +8164,26 @@ ] } }, + "com.amazonaws.iotsitewise#RetentionPeriod": { + "type": "structure", + "members": { + "numberOfDays": { + "target": "com.amazonaws.iotsitewise#NumberOfDays", + "traits": { + "smithy.api#documentation": "

                                                                      The number of days that your data is kept.

                                                                      \n \n

                                                                      If you specified a value for this parameter, the unlimited parameter must\n be false.

                                                                      \n
                                                                      " + } + }, + "unlimited": { + "target": "com.amazonaws.iotsitewise#Unlimited", + "traits": { + "smithy.api#documentation": "

                                                                      If true, your data is kept indefinitely.

                                                                      \n \n

                                                                      If configured to true, you must not specify a value for the\n numberOfDays parameter.

                                                                      \n
                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      How many days your data is kept in the hot tier. By default, your data is kept indefinitely in the hot tier.

                                                                      " + } + }, "com.amazonaws.iotsitewise#SSOApplicationId": { "type": "string", "traits": { @@ -8573,6 +8614,12 @@ "smithy.api#httpError": 401 } }, + "com.amazonaws.iotsitewise#Unlimited": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, "com.amazonaws.iotsitewise#UntagResource": { "type": "operation", "input": { diff --git a/codegen/sdk-codegen/aws-models/iottwinmaker.json b/codegen/sdk-codegen/aws-models/iottwinmaker.json new file mode 100644 index 000000000000..e827ae7677b2 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/iottwinmaker.json @@ -0,0 +1,4422 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.iottwinmaker#AWSIoTTwinMaker": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "IoTTwinMaker", + "arnNamespace": "awsiottwinmaker", + "cloudFormationName": "AWSIoTTwinMaker", + "cloudTrailEventSource": "awsiottwinmaker.amazonaws.com", + "endpointPrefix": "iottwinmaker" + }, + "aws.auth#sigv4": { + "name": "iottwinmaker" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "\n

                                                                      \n TwinMaker is in public preview and is subject to change.\n

                                                                      \n
                                                                      \n

                                                                      IoT TwinMaker is a service that enables you to build operational digital twins of\n physical systems. IoT TwinMaker overlays measurements and analysis from real-world sensors,\n cameras, and enterprise applications so you can create data visualizations to monitor your\n physical factory, building, or industrial plant. You can use this real-world data to\n monitor operations and diagnose and repair errors.

                                                                      ", + "smithy.api#title": "AWS IoT TwinMaker" + }, + "version": "2021-11-29", + "operations": [ + { + "target": "com.amazonaws.iottwinmaker#BatchPutPropertyValues" + }, + { + "target": "com.amazonaws.iottwinmaker#CreateComponentType" + }, + { + "target": "com.amazonaws.iottwinmaker#CreateEntity" + }, + { + "target": "com.amazonaws.iottwinmaker#CreateScene" + }, + { + "target": "com.amazonaws.iottwinmaker#CreateWorkspace" + }, + { + "target": "com.amazonaws.iottwinmaker#DeleteComponentType" + }, + { + "target": "com.amazonaws.iottwinmaker#DeleteEntity" + }, + { + "target": "com.amazonaws.iottwinmaker#DeleteScene" + }, + { + "target": "com.amazonaws.iottwinmaker#DeleteWorkspace" + }, + { + "target": "com.amazonaws.iottwinmaker#GetComponentType" + }, + { + "target": "com.amazonaws.iottwinmaker#GetEntity" + }, + { + "target": "com.amazonaws.iottwinmaker#GetPropertyValue" + }, + { + "target": "com.amazonaws.iottwinmaker#GetPropertyValueHistory" + }, + { + "target": "com.amazonaws.iottwinmaker#GetScene" + }, + { + "target": "com.amazonaws.iottwinmaker#GetWorkspace" + }, + { + "target": "com.amazonaws.iottwinmaker#ListComponentTypes" + }, + { + "target": "com.amazonaws.iottwinmaker#ListEntities" + }, + { + "target": "com.amazonaws.iottwinmaker#ListScenes" + }, + { + "target": "com.amazonaws.iottwinmaker#ListTagsForResource" + }, + { + "target": "com.amazonaws.iottwinmaker#ListWorkspaces" + }, + { + "target": "com.amazonaws.iottwinmaker#TagResource" + }, + { + "target": "com.amazonaws.iottwinmaker#UntagResource" + }, + { + "target": "com.amazonaws.iottwinmaker#UpdateComponentType" + }, + { + "target": "com.amazonaws.iottwinmaker#UpdateEntity" + }, + { + "target": "com.amazonaws.iottwinmaker#UpdateScene" + }, + { + "target": "com.amazonaws.iottwinmaker#UpdateWorkspace" + } + ] + }, + "com.amazonaws.iottwinmaker#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Access is denied.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.iottwinmaker#BatchPutPropertyError": { + "type": "structure", + "members": { + "errorCode": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The error code.

                                                                      ", + "smithy.api#required": {} + } + }, + "errorMessage": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The error message.

                                                                      ", + "smithy.api#required": {} + } + }, + "entry": { + "target": "com.amazonaws.iottwinmaker#PropertyValueEntry", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about errors returned by the BatchPutProperty action.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An error returned by the BatchPutProperty action.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#BatchPutPropertyErrorEntry": { + "type": "structure", + "members": { + "errors": { + "target": "com.amazonaws.iottwinmaker#Errors", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about errors returned by the\n BatchPutProperty action.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about errors returned by the BatchPutProperty action.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#BatchPutPropertyValues": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#BatchPutPropertyValuesRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#BatchPutPropertyValuesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Sets values for multiple time series properties.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/entity-properties", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#BatchPutPropertyValuesRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the properties to set.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entries": { + "target": "com.amazonaws.iottwinmaker#Entries", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the property value entries to set. Each string \n in the mapping must be unique to this object.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#BatchPutPropertyValuesResponse": { + "type": "structure", + "members": { + "errorEntries": { + "target": "com.amazonaws.iottwinmaker#ErrorEntries", + "traits": { + "smithy.api#documentation": "

                                                                      Entries that caused errors in the batch put operation.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#Boolean": { + "type": "boolean", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iottwinmaker#ComponentRequest": { + "type": "structure", + "members": { + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component request.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      " + } + }, + "properties": { + "target": "com.amazonaws.iottwinmaker#PropertyRequests", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the properties to set in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that sets information about a component type create or update request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ComponentResponse": { + "type": "structure", + "members": { + "componentName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the component.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.iottwinmaker#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the component type.

                                                                      " + } + }, + "definedIn": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the property definition set in the request.

                                                                      " + } + }, + "properties": { + "target": "com.amazonaws.iottwinmaker#PropertyResponses", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the properties to set in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that returns information about a component type create or update request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ComponentTypeId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z_\\.\\-0-9:]+$" + } + }, + "com.amazonaws.iottwinmaker#ComponentTypeSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeSummary" + } + }, + "com.amazonaws.iottwinmaker#ComponentTypeSummary": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the component type was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the component type was last updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.iottwinmaker#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The current status of the component type.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about a component type.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ComponentUpdateRequest": { + "type": "structure", + "members": { + "updateType": { + "target": "com.amazonaws.iottwinmaker#ComponentUpdateType", + "traits": { + "smithy.api#documentation": "

                                                                      The update type of the component update request.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      " + } + }, + "propertyUpdates": { + "target": "com.amazonaws.iottwinmaker#PropertyRequests", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the properties to set in the component type update. Each string \n in the mapping must be unique to this object.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The component update request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ComponentUpdateType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATE", + "name": "CREATE" + }, + { + "value": "UPDATE", + "name": "UPDATE" + }, + { + "value": "DELETE", + "name": "DELETE" + } + ] + } + }, + "com.amazonaws.iottwinmaker#ComponentUpdatesMapRequest": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#ComponentUpdateRequest" + } + }, + "com.amazonaws.iottwinmaker#ComponentsMap": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#ComponentResponse" + } + }, + "com.amazonaws.iottwinmaker#ComponentsMapRequest": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#ComponentRequest" + } + }, + "com.amazonaws.iottwinmaker#Configuration": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#Value" + } + }, + "com.amazonaws.iottwinmaker#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A conflict occurred.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.iottwinmaker#ConnectorFailureException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The connector failed.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 424 + } + }, + "com.amazonaws.iottwinmaker#ConnectorTimeoutException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The connector timed out.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 424 + } + }, + "com.amazonaws.iottwinmaker#CreateComponentType": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#CreateComponentTypeRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#CreateComponentTypeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConflictException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a component type.

                                                                      \n \n

                                                                      \n TwinMaker is in public preview and is subject to change.\n

                                                                      \n
                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/component-types/{componentTypeId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#CreateComponentTypeRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "isSingleton": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether an entity can have more than one component of\n this type.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "propertyDefinitions": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionsRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the property definitions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "extendsFrom": { + "target": "com.amazonaws.iottwinmaker#ExtendsFrom", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the parent component type to extend.

                                                                      " + } + }, + "functions": { + "target": "com.amazonaws.iottwinmaker#FunctionsRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the functions in the component type. Each string in the\n mapping must be unique to this object.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata that you can use to manage the component type.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateComponentTypeResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the component type.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateEntity": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#CreateEntityRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#CreateEntityResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConflictException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates an entity.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/entities", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#CreateEntityRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the entity.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      " + } + }, + "entityName": { + "target": "com.amazonaws.iottwinmaker#EntityName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the entity.

                                                                      " + } + }, + "components": { + "target": "com.amazonaws.iottwinmaker#ComponentsMapRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the components in the entity. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "parentEntityId": { + "target": "com.amazonaws.iottwinmaker#ParentEntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity's parent entity.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata that you can use to manage the entity.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateEntityResponse": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the entity.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateScene": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#CreateSceneRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#CreateSceneResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConflictException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a scene.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/scenes", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#CreateSceneRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the scene.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "contentLocation": { + "target": "com.amazonaws.iottwinmaker#S3Url", + "traits": { + "smithy.api#documentation": "

                                                                      The relative path that specifies the location of the content definition file.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description for this scene.

                                                                      " + } + }, + "capabilities": { + "target": "com.amazonaws.iottwinmaker#SceneCapabilities", + "traits": { + "smithy.api#documentation": "

                                                                      A list of capabilities that the scene uses to render itself.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata that you can use to manage the scene.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateSceneResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was created.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateWorkspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#CreateWorkspaceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#CreateWorkspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConflictException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a workplace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#CreateWorkspaceRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the workspace.

                                                                      " + } + }, + "s3Location": { + "target": "com.amazonaws.iottwinmaker#S3Location", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the S3 bucket where resources associated with the workspace are stored.

                                                                      ", + "smithy.api#required": {} + } + }, + "role": { + "target": "com.amazonaws.iottwinmaker#RoleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the execution role associated with the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata that you can use to manage the workspace

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#CreateWorkspaceResponse": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the workspace was created.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DataConnector": { + "type": "structure", + "members": { + "lambda": { + "target": "com.amazonaws.iottwinmaker#LambdaFunction", + "traits": { + "smithy.api#documentation": "

                                                                      The Lambda function associated with this data connector.

                                                                      " + } + }, + "isNative": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the data connector is native to TwinMaker.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The data connector.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#DataType": { + "type": "structure", + "members": { + "type": { + "target": "com.amazonaws.iottwinmaker#Type", + "traits": { + "smithy.api#documentation": "

                                                                      The underlying type of the data type.

                                                                      ", + "smithy.api#required": {} + } + }, + "nestedType": { + "target": "com.amazonaws.iottwinmaker#DataType", + "traits": { + "smithy.api#documentation": "

                                                                      The nested type in the data type.

                                                                      " + } + }, + "allowedValues": { + "target": "com.amazonaws.iottwinmaker#DataValueList", + "traits": { + "smithy.api#documentation": "

                                                                      The allowed values for this data type.

                                                                      " + } + }, + "unitOfMeasure": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The unit of measure used in this data type.

                                                                      " + } + }, + "relationship": { + "target": "com.amazonaws.iottwinmaker#Relationship", + "traits": { + "smithy.api#documentation": "

                                                                      A relationship that associates a component with another component.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies the data type of a property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#DataValue": { + "type": "structure", + "members": { + "booleanValue": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value.

                                                                      " + } + }, + "doubleValue": { + "target": "com.amazonaws.iottwinmaker#Double", + "traits": { + "smithy.api#documentation": "

                                                                      A double value.

                                                                      " + } + }, + "integerValue": { + "target": "com.amazonaws.iottwinmaker#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      An integer value.

                                                                      " + } + }, + "longValue": { + "target": "com.amazonaws.iottwinmaker#Long", + "traits": { + "smithy.api#documentation": "

                                                                      A long value.

                                                                      " + } + }, + "stringValue": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      A string value.

                                                                      " + } + }, + "listValue": { + "target": "com.amazonaws.iottwinmaker#DataValueList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of multiple values.

                                                                      " + } + }, + "mapValue": { + "target": "com.amazonaws.iottwinmaker#DataValueMap", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to multiple DataValue objects.

                                                                      " + } + }, + "relationshipValue": { + "target": "com.amazonaws.iottwinmaker#RelationshipValue", + "traits": { + "smithy.api#documentation": "

                                                                      A value that relates a component to another component.

                                                                      " + } + }, + "expression": { + "target": "com.amazonaws.iottwinmaker#Expression", + "traits": { + "smithy.api#documentation": "

                                                                      An expression that produces the value.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies a value for a property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#DataValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#DataValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.iottwinmaker#DataValueMap": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#String" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#DataValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.iottwinmaker#DeleteComponentType": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#DeleteComponentTypeRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#DeleteComponentTypeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a component type.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "DELETE", + "uri": "/workspaces/{workspaceId}/component-types/{componentTypeId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#DeleteComponentTypeRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteComponentTypeResponse": { + "type": "structure", + "members": { + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the component type to be deleted.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteEntity": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#DeleteEntityRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#DeleteEntityResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an entity.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "DELETE", + "uri": "/workspaces/{workspaceId}/entities/{entityId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#DeleteEntityRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the entity to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "isRecursive": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the operation deletes child entities.

                                                                      ", + "smithy.api#httpQuery": "isRecursive" + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteEntityResponse": { + "type": "structure", + "members": { + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the deleted entity.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteScene": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#DeleteSceneRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#DeleteSceneResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a scene.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "DELETE", + "uri": "/workspaces/{workspaceId}/scenes/{sceneId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#DeleteSceneRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteSceneResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.iottwinmaker#DeleteWorkspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#DeleteWorkspaceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#DeleteWorkspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "DELETE", + "uri": "/workspaces/{workspaceId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#DeleteWorkspaceRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#DeleteWorkspaceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.iottwinmaker#Description": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 512 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.iottwinmaker#Double": { + "type": "double", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iottwinmaker#EntityId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}|^[a-zA-Z0-9][a-zA-Z_\\-0-9.:]*[a-zA-Z0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#EntityName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z_0-9-.][a-zA-Z_0-9-. ]*[a-zA-Z0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#EntityPropertyReference": { + "type": "structure", + "members": { + "componentName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the component.

                                                                      " + } + }, + "externalIdProperty": { + "target": "com.amazonaws.iottwinmaker#ExternalIdProperty", + "traits": { + "smithy.api#documentation": "

                                                                      A mapping of external IDs to property names. External IDs uniquely identify properties from external data stores.

                                                                      " + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      " + } + }, + "propertyName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the property.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that uniquely identifies an entity property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#EntitySummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#EntitySummary" + } + }, + "com.amazonaws.iottwinmaker#EntitySummary": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "entityName": { + "target": "com.amazonaws.iottwinmaker#EntityName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "parentEntityId": { + "target": "com.amazonaws.iottwinmaker#ParentEntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the parent entity.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.iottwinmaker#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The current status of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the entity.

                                                                      " + } + }, + "hasChildEntities": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the entity has child entities or not.

                                                                      " + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The last date and time when the entity was updated.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about an entity.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#Entries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#PropertyValueEntry" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#ErrorCode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "VALIDATION_ERROR", + "name": "VALIDATION_ERROR" + }, + { + "value": "INTERNAL_FAILURE", + "name": "INTERNAL_FAILURE" + } + ] + } + }, + "com.amazonaws.iottwinmaker#ErrorDetails": { + "type": "structure", + "members": { + "code": { + "target": "com.amazonaws.iottwinmaker#ErrorCode", + "traits": { + "smithy.api#documentation": "

                                                                      The error code.

                                                                      " + } + }, + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The error message.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The error details.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ErrorEntries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#BatchPutPropertyErrorEntry" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#ErrorMessage": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + } + } + }, + "com.amazonaws.iottwinmaker#Errors": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#BatchPutPropertyError" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#ExceptionMessage": { + "type": "string" + }, + "com.amazonaws.iottwinmaker#Expression": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 316 + }, + "smithy.api#pattern": "^(^\\$\\{Parameters\\.[a-zA-z]+([a-zA-z_0-9]*)}$)$" + } + }, + "com.amazonaws.iottwinmaker#ExtendsFrom": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId" + } + }, + "com.amazonaws.iottwinmaker#ExternalIdProperty": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#String" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#String" + } + }, + "com.amazonaws.iottwinmaker#FunctionRequest": { + "type": "structure", + "members": { + "requiredProperties": { + "target": "com.amazonaws.iottwinmaker#RequiredProperties", + "traits": { + "smithy.api#documentation": "

                                                                      The required properties of the function.

                                                                      " + } + }, + "scope": { + "target": "com.amazonaws.iottwinmaker#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      The scope of the function.

                                                                      " + } + }, + "implementedBy": { + "target": "com.amazonaws.iottwinmaker#DataConnector", + "traits": { + "smithy.api#documentation": "

                                                                      The data connector.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The function request body.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#FunctionResponse": { + "type": "structure", + "members": { + "requiredProperties": { + "target": "com.amazonaws.iottwinmaker#RequiredProperties", + "traits": { + "smithy.api#documentation": "

                                                                      The required properties of the function.

                                                                      " + } + }, + "scope": { + "target": "com.amazonaws.iottwinmaker#Scope", + "traits": { + "smithy.api#documentation": "

                                                                      The scope of the function.

                                                                      " + } + }, + "implementedBy": { + "target": "com.amazonaws.iottwinmaker#DataConnector", + "traits": { + "smithy.api#documentation": "

                                                                      The data connector.

                                                                      " + } + }, + "isInherited": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates whether this function is inherited.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The function response.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#FunctionsRequest": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#FunctionRequest" + } + }, + "com.amazonaws.iottwinmaker#FunctionsResponse": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#FunctionResponse" + } + }, + "com.amazonaws.iottwinmaker#GetComponentType": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetComponentTypeRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetComponentTypeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about a component type.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/workspaces/{workspaceId}/component-types/{componentTypeId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#GetComponentTypeRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetComponentTypeResponse": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "isSingleton": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether an entity can have more than one component of this\n type.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "propertyDefinitions": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionsResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the property definitions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "extendsFrom": { + "target": "com.amazonaws.iottwinmaker#ExtendsFrom", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the parent component type that this component type extends.

                                                                      " + } + }, + "functions": { + "target": "com.amazonaws.iottwinmaker#FunctionsResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the functions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the component type was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the component was last updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "isAbstract": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the component type is abstract.

                                                                      " + } + }, + "isSchemaInitialized": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the component type has a schema initializer and that the \n schema initializer has run.

                                                                      " + } + }, + "status": { + "target": "com.amazonaws.iottwinmaker#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The current status of the component type.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#GetEntity": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetEntityRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetEntityResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about an entity.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/workspaces/{workspaceId}/entities/{entityId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#GetEntityRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetEntityResponse": { + "type": "structure", + "members": { + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "entityName": { + "target": "com.amazonaws.iottwinmaker#EntityName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "status": { + "target": "com.amazonaws.iottwinmaker#Status", + "traits": { + "smithy.api#documentation": "

                                                                      The current status of the entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the entity.

                                                                      " + } + }, + "components": { + "target": "com.amazonaws.iottwinmaker#ComponentsMap", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the components in the entity. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "parentEntityId": { + "target": "com.amazonaws.iottwinmaker#ParentEntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the parent entity for this entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "hasChildEntities": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the entity has associated child entities.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was last updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValue": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetPropertyValueRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetPropertyValueResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConnectorFailureException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConnectorTimeoutException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the property values for a component, component type, entity, or workspace.

                                                                      \n

                                                                      You must specify a value for either componentName, componentTypeId, entityId, or workspaceId.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/entity-properties/value", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValueHistory": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetPropertyValueHistoryRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetPropertyValueHistoryResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConnectorFailureException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConnectorTimeoutException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about the history of a time series property value for a component, component type, entity, or workspace.

                                                                      \n

                                                                      You must specify a value for workspaceId. For entity-specific queries, specify values for componentName and \n entityId. For cross-entity quries, specify a value for componentTypeId.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "data." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/entity-properties/history", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValueHistoryRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      " + } + }, + "componentName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the component.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      " + } + }, + "selectedProperties": { + "target": "com.amazonaws.iottwinmaker#SelectedPropertyList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of properties whose value histories the request retrieves.

                                                                      ", + "smithy.api#required": {} + } + }, + "propertyFilters": { + "target": "com.amazonaws.iottwinmaker#PropertyFilters", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that filter the property value history request.

                                                                      " + } + }, + "startDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time of the earliest property value to return.

                                                                      ", + "smithy.api#required": {} + } + }, + "endDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time of the latest property value to return.

                                                                      ", + "smithy.api#required": {} + } + }, + "interpolation": { + "target": "com.amazonaws.iottwinmaker#InterpolationParameters", + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies the interpolation type and the interval over which to interpolate data.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return.

                                                                      " + } + }, + "orderByTime": { + "target": "com.amazonaws.iottwinmaker#OrderByTime", + "traits": { + "smithy.api#documentation": "

                                                                      The time direction to use in the result order.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValueHistoryResponse": { + "type": "structure", + "members": { + "propertyValues": { + "target": "com.amazonaws.iottwinmaker#PropertyValueList", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the property definitions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValueRequest": { + "type": "structure", + "members": { + "componentName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the component whose property values the operation returns.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type whose property values the operation returns.

                                                                      " + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity whose property values the operation returns.

                                                                      " + } + }, + "selectedProperties": { + "target": "com.amazonaws.iottwinmaker#SelectedPropertyList", + "traits": { + "smithy.api#documentation": "

                                                                      The properties whose values the operation returns.

                                                                      ", + "smithy.api#required": {} + } + }, + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace whose values the operation returns.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetPropertyValueResponse": { + "type": "structure", + "members": { + "propertyValues": { + "target": "com.amazonaws.iottwinmaker#PropertyLatestValueMap", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the properties and latest property values in the response. Each string \n in the mapping must be unique to this object.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetScene": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetSceneRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetSceneResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about a scene.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/workspaces/{workspaceId}/scenes/{sceneId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#GetSceneRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the scene.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetSceneResponse": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "contentLocation": { + "target": "com.amazonaws.iottwinmaker#S3Url", + "traits": { + "smithy.api#documentation": "

                                                                      The relative path that specifies the location of the content definition file.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was last updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the scene.

                                                                      " + } + }, + "capabilities": { + "target": "com.amazonaws.iottwinmaker#SceneCapabilities", + "traits": { + "smithy.api#documentation": "

                                                                      A list of capabilities that the scene uses to render.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#GetWorkspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#GetWorkspaceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#GetWorkspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "GET", + "uri": "/workspaces/{workspaceId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#GetWorkspaceRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#IdOrArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#GetWorkspaceResponse": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the workspace.

                                                                      " + } + }, + "s3Location": { + "target": "com.amazonaws.iottwinmaker#S3Location", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the S3 bucket where resources associated with the workspace are stored.

                                                                      ", + "smithy.api#required": {} + } + }, + "role": { + "target": "com.amazonaws.iottwinmaker#RoleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the execution role associated with the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the workspace was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the workspace was last updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#Id": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z_0-9][a-zA-Z_\\-0-9]*[a-zA-Z0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#IdOrArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^[a-zA-Z][a-zA-Z_\\-0-9]*[a-zA-Z0-9]+$|^arn:((aws)|(aws-cn)|(aws-us-gov)):iottwinmaker:[a-z0-9-]+:[0-9]{12}:[\\/a-zA-Z0-9_-]+$" + } + }, + "com.amazonaws.iottwinmaker#Integer": { + "type": "integer", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iottwinmaker#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An unexpected error has occurred.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.iottwinmaker#InterpolationParameters": { + "type": "structure", + "members": { + "interpolationType": { + "target": "com.amazonaws.iottwinmaker#InterpolationType", + "traits": { + "smithy.api#documentation": "

                                                                      The interpolation type.

                                                                      " + } + }, + "intervalInSeconds": { + "target": "com.amazonaws.iottwinmaker#IntervalInSeconds", + "traits": { + "smithy.api#documentation": "

                                                                      The interpolation time interval in seconds.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies how to interpolate data in a list.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#InterpolationType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "LINEAR", + "name": "LINEAR" + } + ] + } + }, + "com.amazonaws.iottwinmaker#IntervalInSeconds": { + "type": "long", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iottwinmaker#LambdaArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:((aws)|(aws-cn)|(aws-us-gov)):lambda:[a-z0-9-]+:[0-9]{12}:function:[\\/a-zA-Z0-9_-]+$" + } + }, + "com.amazonaws.iottwinmaker#LambdaFunction": { + "type": "structure", + "members": { + "arn": { + "target": "com.amazonaws.iottwinmaker#LambdaArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the Lambda function.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The Lambda function.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ListComponentTypes": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#ListComponentTypesRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#ListComponentTypesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists all component types in a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/component-types-list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iottwinmaker#ListComponentTypesFilter": { + "type": "union", + "members": { + "extendsFrom": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The component type that the component types in the list extend.

                                                                      " + } + }, + "namespace": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The namespace to which the component types in the list belong.

                                                                      " + } + }, + "isAbstract": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the component types in the list are abstract.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that filters items in a list of component types.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ListComponentTypesFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#ListComponentTypesFilter" + } + }, + "com.amazonaws.iottwinmaker#ListComponentTypesRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "filters": { + "target": "com.amazonaws.iottwinmaker#ListComponentTypesFilters", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that filter the request.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to display.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListComponentTypesResponse": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "componentTypeSummaries": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeSummaries", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about the component types.

                                                                      ", + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the maximum number of results to display.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListEntities": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#ListEntitiesRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#ListEntitiesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists all entities in a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/entities-list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iottwinmaker#ListEntitiesFilter": { + "type": "union", + "members": { + "parentEntityId": { + "target": "com.amazonaws.iottwinmaker#ParentEntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The parent of the entities in the list.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type in the entities in the list.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that filters items in a list of entities.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ListEntitiesFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#ListEntitiesFilter" + } + }, + "com.amazonaws.iottwinmaker#ListEntitiesRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "filters": { + "target": "com.amazonaws.iottwinmaker#ListEntitiesFilters", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that filter the request.

                                                                      " + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to display.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListEntitiesResponse": { + "type": "structure", + "members": { + "entitySummaries": { + "target": "com.amazonaws.iottwinmaker#EntitySummaries", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about the entities.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListScenes": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#ListScenesRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#ListScenesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists all scenes in a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces/{workspaceId}/scenes-list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iottwinmaker#ListScenesRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the scenes.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the maximum number of results to display.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListScenesResponse": { + "type": "structure", + "members": { + "sceneSummaries": { + "target": "com.amazonaws.iottwinmaker#SceneSummaries", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about the scenes.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists all tags associated with a resource.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/tags-list", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to display.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata that you can use to manage a resource.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListWorkspaces": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#ListWorkspacesRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#ListWorkspacesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves information about workspaces in the current account.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/workspaces-list", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + } + } + }, + "com.amazonaws.iottwinmaker#ListWorkspacesRequest": { + "type": "structure", + "members": { + "maxResults": { + "target": "com.amazonaws.iottwinmaker#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to display.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#ListWorkspacesResponse": { + "type": "structure", + "members": { + "workspaceSummaries": { + "target": "com.amazonaws.iottwinmaker#WorkspaceSummaries", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about the workspaces.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.iottwinmaker#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The string that specifies the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#Long": { + "type": "long", + "traits": { + "smithy.api#box": {} + } + }, + "com.amazonaws.iottwinmaker#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.iottwinmaker#Name": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z_\\-0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#NextToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 17880 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.iottwinmaker#OrderByTime": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ASCENDING", + "name": "ASCENDING" + }, + { + "value": "DESCENDING", + "name": "DESCENDING" + } + ] + } + }, + "com.amazonaws.iottwinmaker#ParentEntityId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^\\$ROOT|^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}|^[a-zA-Z0-9][a-zA-Z_\\-0-9.:]*[a-zA-Z0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#ParentEntityUpdateRequest": { + "type": "structure", + "members": { + "updateType": { + "target": "com.amazonaws.iottwinmaker#ParentEntityUpdateType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the update.

                                                                      ", + "smithy.api#required": {} + } + }, + "parentEntityId": { + "target": "com.amazonaws.iottwinmaker#ParentEntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the parent entity.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The parent entity update request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#ParentEntityUpdateType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UPDATE", + "name": "UPDATE" + }, + { + "value": "DELETE", + "name": "DELETE" + } + ] + } + }, + "com.amazonaws.iottwinmaker#PropertyDefinitionRequest": { + "type": "structure", + "members": { + "dataType": { + "target": "com.amazonaws.iottwinmaker#DataType", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the data type.

                                                                      " + } + }, + "isRequiredInEntity": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property is required.

                                                                      " + } + }, + "isExternalId": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property ID comes from an external data store.

                                                                      " + } + }, + "isStoredExternally": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property is stored externally.

                                                                      " + } + }, + "isTimeSeries": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property consists of time series data.

                                                                      " + } + }, + "defaultValue": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the default value.

                                                                      " + } + }, + "configuration": { + "target": "com.amazonaws.iottwinmaker#Configuration", + "traits": { + "smithy.api#documentation": "

                                                                      A mapping that specifies configuration information about the property. Use this field to\n specify information that you read from and write to an external source.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that sets information about a property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyDefinitionResponse": { + "type": "structure", + "members": { + "dataType": { + "target": "com.amazonaws.iottwinmaker#DataType", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the data type.

                                                                      ", + "smithy.api#required": {} + } + }, + "isTimeSeries": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property consists of time series data.

                                                                      ", + "smithy.api#required": {} + } + }, + "isRequiredInEntity": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property is required in an entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "isExternalId": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property ID comes from an external data store.

                                                                      ", + "smithy.api#required": {} + } + }, + "isStoredExternally": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property is stored externally.

                                                                      ", + "smithy.api#required": {} + } + }, + "isImported": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property definition is imported from an external data store.

                                                                      ", + "smithy.api#required": {} + } + }, + "isFinal": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property definition can be updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "isInherited": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether the property definition is inherited from a parent entity.

                                                                      ", + "smithy.api#required": {} + } + }, + "defaultValue": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains the default value.

                                                                      " + } + }, + "configuration": { + "target": "com.amazonaws.iottwinmaker#Configuration", + "traits": { + "smithy.api#documentation": "

                                                                      A mapping that specifies configuration information about the property.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains response data from a property definition request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyDefinitionsRequest": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionRequest" + } + }, + "com.amazonaws.iottwinmaker#PropertyDefinitionsResponse": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionResponse" + } + }, + "com.amazonaws.iottwinmaker#PropertyFilter": { + "type": "structure", + "members": { + "propertyName": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The property name associated with this property filter.

                                                                      " + } + }, + "operator": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The operator associated with this property filter.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value associated with this property filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that filters items returned by a property request.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#PropertyFilter" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#PropertyLatestValue": { + "type": "structure", + "members": { + "propertyReference": { + "target": "com.amazonaws.iottwinmaker#EntityPropertyReference", + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies information about a property.>

                                                                      ", + "smithy.api#required": {} + } + }, + "propertyValue": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the property.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The latest value of the property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyLatestValueMap": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#PropertyLatestValue" + } + }, + "com.amazonaws.iottwinmaker#PropertyRequest": { + "type": "structure", + "members": { + "definition": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies information about a property.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the property.

                                                                      " + } + }, + "updateType": { + "target": "com.amazonaws.iottwinmaker#PropertyUpdateType", + "traits": { + "smithy.api#documentation": "

                                                                      The update type of the update property request.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that sets information about a property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyRequests": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#PropertyRequest" + } + }, + "com.amazonaws.iottwinmaker#PropertyResponse": { + "type": "structure", + "members": { + "definition": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionResponse", + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies information about a property.

                                                                      " + } + }, + "value": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the property.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about a property response.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyResponses": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#Name" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#PropertyResponse" + } + }, + "com.amazonaws.iottwinmaker#PropertyUpdateType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UPDATE", + "name": "UPDATE" + }, + { + "value": "DELETE", + "name": "DELETE" + } + ] + } + }, + "com.amazonaws.iottwinmaker#PropertyValue": { + "type": "structure", + "members": { + "timestamp": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The timestamp of a value for a time series property.

                                                                      ", + "smithy.api#required": {} + } + }, + "value": { + "target": "com.amazonaws.iottwinmaker#DataValue", + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies a value for a time series property.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about a value for a time series property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyValueEntry": { + "type": "structure", + "members": { + "entityPropertyReference": { + "target": "com.amazonaws.iottwinmaker#EntityPropertyReference", + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about the entity that has the property.

                                                                      ", + "smithy.api#required": {} + } + }, + "propertyValues": { + "target": "com.amazonaws.iottwinmaker#PropertyValues", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that specify time series property values.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies information about time series property values.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyValueHistory": { + "type": "structure", + "members": { + "entityPropertyReference": { + "target": "com.amazonaws.iottwinmaker#EntityPropertyReference", + "traits": { + "smithy.api#documentation": "

                                                                      An object that uniquely identifies an entity property.

                                                                      ", + "smithy.api#required": {} + } + }, + "values": { + "target": "com.amazonaws.iottwinmaker#Values", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects that contain information about the values in the history of a time series property.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The history of values for a time series property.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#PropertyValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#PropertyValueHistory" + } + }, + "com.amazonaws.iottwinmaker#PropertyValues": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#PropertyValue" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#Relationship": { + "type": "structure", + "members": { + "targetComponentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the target component type associated with this relationship.

                                                                      " + } + }, + "relationshipType": { + "target": "com.amazonaws.iottwinmaker#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the relationship.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that specifies a relationship with another component type.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#RelationshipValue": { + "type": "structure", + "members": { + "targetEntityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the target entity associated with this relationship value.

                                                                      " + } + }, + "targetComponentName": { + "target": "com.amazonaws.iottwinmaker#Name", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the target component associated with the relationship value.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A value that associates a component and an entity.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#RequiredProperties": { + "type": "set", + "member": { + "target": "com.amazonaws.iottwinmaker#Name" + } + }, + "com.amazonaws.iottwinmaker#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The resource wasn't found.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.iottwinmaker#RoleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:((aws)|(aws-cn)|(aws-us-gov)):iam::[0-9]{12}:role/" + } + }, + "com.amazonaws.iottwinmaker#S3Location": { + "type": "string" + }, + "com.amazonaws.iottwinmaker#S3Url": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^[sS]3://[A-Za-z0-9._/-]+$" + } + }, + "com.amazonaws.iottwinmaker#SceneCapabilities": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#SceneCapability" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.iottwinmaker#SceneCapability": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.iottwinmaker#SceneSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#SceneSummary" + } + }, + "com.amazonaws.iottwinmaker#SceneSummary": { + "type": "structure", + "members": { + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "contentLocation": { + "target": "com.amazonaws.iottwinmaker#S3Url", + "traits": { + "smithy.api#documentation": "

                                                                      The relative path that specifies the location of the content definition file.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the scene.

                                                                      ", + "smithy.api#required": {} + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was last updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The scene description.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about a scene.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#Scope": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ENTITY", + "name": "ENTITY" + }, + { + "value": "WORKSPACE", + "name": "WORKSPACE" + } + ] + } + }, + "com.amazonaws.iottwinmaker#SelectedPropertyList": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#String" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 10 + } + } + }, + "com.amazonaws.iottwinmaker#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The service quota was exceeded.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.iottwinmaker#State": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATING", + "name": "CREATING" + }, + { + "value": "UPDATING", + "name": "UPDATING" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "ERROR", + "name": "ERROR" + } + ] + } + }, + "com.amazonaws.iottwinmaker#Status": { + "type": "structure", + "members": { + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the entity, component, component type, or workspace.

                                                                      " + } + }, + "error": { + "target": "com.amazonaws.iottwinmaker#ErrorDetails", + "traits": { + "smithy.api#documentation": "

                                                                      The error message.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that represents the status of an entity, component, component type, or workspace.

                                                                      " + } + }, + "com.amazonaws.iottwinmaker#String": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.iottwinmaker#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.iottwinmaker#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.iottwinmaker#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.iottwinmaker#TagKey" + }, + "value": { + "target": "com.amazonaws.iottwinmaker#TagValue" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.iottwinmaker#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#TooManyTagsException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Adds tags to a resource.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/tags", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#TagResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.iottwinmaker#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata to add to this resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.iottwinmaker#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.iottwinmaker#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The rate exceeds the limit.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.iottwinmaker#Timestamp": { + "type": "timestamp" + }, + "com.amazonaws.iottwinmaker#TooManyTagsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The number of tags exceeds the limit.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.iottwinmaker#TwinMakerArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:((aws)|(aws-cn)|(aws-us-gov)):iottwinmaker:[a-z0-9-]+:[0-9]{12}:[\\/a-zA-Z0-9_\\-\\.:]+$" + } + }, + "com.amazonaws.iottwinmaker#Type": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "RELATIONSHIP", + "name": "RELATIONSHIP" + }, + { + "value": "STRING", + "name": "STRING" + }, + { + "value": "LONG", + "name": "LONG" + }, + { + "value": "BOOLEAN", + "name": "BOOLEAN" + }, + { + "value": "INTEGER", + "name": "INTEGER" + }, + { + "value": "DOUBLE", + "name": "DOUBLE" + }, + { + "value": "LIST", + "name": "LIST" + }, + { + "value": "MAP", + "name": "MAP" + } + ] + } + }, + "com.amazonaws.iottwinmaker#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes tags from a resource.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceARN": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#httpQuery": "resourceARN", + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.iottwinmaker#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of tag key names to remove from the resource. You don't specify the value. Both the key and its associated value are removed.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.iottwinmaker#UpdateComponentType": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#UpdateComponentTypeRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#UpdateComponentTypeResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates information in a component type.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "PUT", + "uri": "/workspaces/{workspaceId}/component-types/{componentTypeId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#UpdateComponentTypeRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "isSingleton": { + "target": "com.amazonaws.iottwinmaker#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      A Boolean value that specifies whether an entity can have more than one component of this\n type.

                                                                      " + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the component type.

                                                                      " + } + }, + "propertyDefinitions": { + "target": "com.amazonaws.iottwinmaker#PropertyDefinitionsRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the property definitions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "extendsFrom": { + "target": "com.amazonaws.iottwinmaker#ExtendsFrom", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the component type that this component type extends.

                                                                      " + } + }, + "functions": { + "target": "com.amazonaws.iottwinmaker#FunctionsRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the functions in the component type. Each string \n in the mapping must be unique to this object.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateComponentTypeResponse": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "componentTypeId": { + "target": "com.amazonaws.iottwinmaker#ComponentTypeId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the component type.

                                                                      ", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the component type.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateEntity": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#UpdateEntityRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#UpdateEntityResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#ConflictException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates an entity.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "PUT", + "uri": "/workspaces/{workspaceId}/entities/{entityId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#UpdateEntityRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the entity.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityId": { + "target": "com.amazonaws.iottwinmaker#EntityId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the entity.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "entityName": { + "target": "com.amazonaws.iottwinmaker#EntityName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the entity.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the entity.

                                                                      " + } + }, + "componentUpdates": { + "target": "com.amazonaws.iottwinmaker#ComponentUpdatesMapRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that maps strings to the component updates in the request. Each string \n in the mapping must be unique to this object.

                                                                      " + } + }, + "parentEntityUpdate": { + "target": "com.amazonaws.iottwinmaker#ParentEntityUpdateRequest", + "traits": { + "smithy.api#documentation": "

                                                                      An object that describes the update request for a parent entity.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateEntityResponse": { + "type": "structure", + "members": { + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the entity was last updated.

                                                                      ", + "smithy.api#required": {} + } + }, + "state": { + "target": "com.amazonaws.iottwinmaker#State", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the entity update.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateScene": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#UpdateSceneRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#UpdateSceneResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a scene.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "PUT", + "uri": "/workspaces/{workspaceId}/scenes/{sceneId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#UpdateSceneRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace that contains the scene.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "sceneId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the scene.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "contentLocation": { + "target": "com.amazonaws.iottwinmaker#S3Url", + "traits": { + "smithy.api#documentation": "

                                                                      The relative path that specifies the location of the content definition file.

                                                                      " + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of this scene.

                                                                      " + } + }, + "capabilities": { + "target": "com.amazonaws.iottwinmaker#SceneCapabilities", + "traits": { + "smithy.api#documentation": "

                                                                      A list of capabilities that the scene uses to render.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateSceneResponse": { + "type": "structure", + "members": { + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the scene was last updated.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateWorkspace": { + "type": "operation", + "input": { + "target": "com.amazonaws.iottwinmaker#UpdateWorkspaceRequest" + }, + "output": { + "target": "com.amazonaws.iottwinmaker#UpdateWorkspaceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.iottwinmaker#AccessDeniedException" + }, + { + "target": "com.amazonaws.iottwinmaker#InternalServerException" + }, + { + "target": "com.amazonaws.iottwinmaker#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.iottwinmaker#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.iottwinmaker#ThrottlingException" + }, + { + "target": "com.amazonaws.iottwinmaker#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a workspace.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "api." + }, + "smithy.api#http": { + "method": "PUT", + "uri": "/workspaces/{workspaceId}", + "code": 200 + } + } + }, + "com.amazonaws.iottwinmaker#UpdateWorkspaceRequest": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the workspace.

                                                                      " + } + }, + "role": { + "target": "com.amazonaws.iottwinmaker#RoleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the execution role associated with the workspace.

                                                                      " + } + } + } + }, + "com.amazonaws.iottwinmaker#UpdateWorkspaceResponse": { + "type": "structure", + "members": { + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time of the current update.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.iottwinmaker#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.iottwinmaker#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Failed

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.iottwinmaker#Value": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^[a-zA-Z_\\-0-9]+$" + } + }, + "com.amazonaws.iottwinmaker#Values": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#PropertyValue" + } + }, + "com.amazonaws.iottwinmaker#WorkspaceSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.iottwinmaker#WorkspaceSummary" + } + }, + "com.amazonaws.iottwinmaker#WorkspaceSummary": { + "type": "structure", + "members": { + "workspaceId": { + "target": "com.amazonaws.iottwinmaker#Id", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "arn": { + "target": "com.amazonaws.iottwinmaker#TwinMakerArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the workspace.

                                                                      ", + "smithy.api#required": {} + } + }, + "description": { + "target": "com.amazonaws.iottwinmaker#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description of the workspace.

                                                                      " + } + }, + "creationDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the workspace was created.

                                                                      ", + "smithy.api#required": {} + } + }, + "updateDateTime": { + "target": "com.amazonaws.iottwinmaker#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time when the workspace was last updated.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that contains information about a workspace.

                                                                      " + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/kafka.json b/codegen/sdk-codegen/aws-models/kafka.json index cf41d27a1d33..bc42d95b96a8 100644 --- a/codegen/sdk-codegen/aws-models/kafka.json +++ b/codegen/sdk-codegen/aws-models/kafka.json @@ -476,6 +476,91 @@ } } }, + "com.amazonaws.kafka#Cluster": { + "type": "structure", + "members": { + "ActiveOperationArn": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that uniquely identifies a cluster operation.

                                                                      ", + "smithy.api#jsonName": "activeOperationArn" + } + }, + "ClusterType": { + "target": "com.amazonaws.kafka#ClusterType", + "traits": { + "smithy.api#documentation": "

                                                                      Cluster Type.

                                                                      ", + "smithy.api#jsonName": "clusterType" + } + }, + "ClusterArn": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                                                                      ", + "smithy.api#jsonName": "clusterArn" + } + }, + "ClusterName": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the cluster.

                                                                      ", + "smithy.api#jsonName": "clusterName" + } + }, + "CreationTime": { + "target": "com.amazonaws.kafka#__timestampIso8601", + "traits": { + "smithy.api#documentation": "

                                                                      The time when the cluster was created.

                                                                      ", + "smithy.api#jsonName": "creationTime" + } + }, + "CurrentVersion": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The current version of the MSK cluster.

                                                                      ", + "smithy.api#jsonName": "currentVersion" + } + }, + "State": { + "target": "com.amazonaws.kafka#ClusterState", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, HEALING, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

                                                                      ", + "smithy.api#jsonName": "state" + } + }, + "StateInfo": { + "target": "com.amazonaws.kafka#StateInfo", + "traits": { + "smithy.api#documentation": "

                                                                      State Info for the Amazon MSK cluster.

                                                                      ", + "smithy.api#jsonName": "stateInfo" + } + }, + "Tags": { + "target": "com.amazonaws.kafka#__mapOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      Tags attached to the cluster.

                                                                      ", + "smithy.api#jsonName": "tags" + } + }, + "Provisioned": { + "target": "com.amazonaws.kafka#Provisioned", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the provisioned cluster.

                                                                      ", + "smithy.api#jsonName": "provisioned" + } + }, + "Serverless": { + "target": "com.amazonaws.kafka#Serverless", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the serverless cluster.

                                                                      ", + "smithy.api#jsonName": "serverless" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Returns information about a cluster.

                                                                      " + } + }, "com.amazonaws.kafka#ClusterInfo": { "type": "structure", "members": { @@ -770,6 +855,22 @@ ] } }, + "com.amazonaws.kafka#ClusterType": { + "type": "string", + "traits": { + "smithy.api#documentation": "

                                                                      The type of cluster.

                                                                      ", + "smithy.api#enum": [ + { + "value": "PROVISIONED", + "name": "PROVISIONED" + }, + { + "value": "SERVERLESS", + "name": "SERVERLESS" + } + ] + } + }, "com.amazonaws.kafka#CompatibleKafkaVersion": { "type": "structure", "members": { @@ -1121,6 +1222,113 @@ } } }, + "com.amazonaws.kafka#CreateClusterV2": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafka#CreateClusterV2Request" + }, + "output": { + "target": "com.amazonaws.kafka#CreateClusterV2Response" + }, + "errors": [ + { + "target": "com.amazonaws.kafka#BadRequestException" + }, + { + "target": "com.amazonaws.kafka#ConflictException" + }, + { + "target": "com.amazonaws.kafka#ForbiddenException" + }, + { + "target": "com.amazonaws.kafka#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafka#ServiceUnavailableException" + }, + { + "target": "com.amazonaws.kafka#TooManyRequestsException" + }, + { + "target": "com.amazonaws.kafka#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a new MSK cluster.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/api/v2/clusters", + "code": 200 + } + } + }, + "com.amazonaws.kafka#CreateClusterV2Request": { + "type": "structure", + "members": { + "ClusterName": { + "target": "com.amazonaws.kafka#__stringMin1Max64", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the cluster.

                                                                      ", + "smithy.api#jsonName": "clusterName", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.kafka#__mapOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      A map of tags that you want the cluster to have.

                                                                      ", + "smithy.api#jsonName": "tags" + } + }, + "Provisioned": { + "target": "com.amazonaws.kafka#ProvisionedRequest", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the provisioned cluster.

                                                                      ", + "smithy.api#jsonName": "provisioned" + } + }, + "Serverless": { + "target": "com.amazonaws.kafka#ServerlessRequest", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the serverless cluster.

                                                                      ", + "smithy.api#jsonName": "serverless" + } + } + } + }, + "com.amazonaws.kafka#CreateClusterV2Response": { + "type": "structure", + "members": { + "ClusterArn": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the cluster.

                                                                      ", + "smithy.api#jsonName": "clusterArn" + } + }, + "ClusterName": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the MSK cluster.

                                                                      ", + "smithy.api#jsonName": "clusterName" + } + }, + "State": { + "target": "com.amazonaws.kafka#ClusterState", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the cluster. The possible states are ACTIVE, CREATING, DELETING, FAILED, HEALING, MAINTENANCE, REBOOTING_BROKER, and UPDATING.

                                                                      ", + "smithy.api#jsonName": "state" + } + }, + "ClusterType": { + "target": "com.amazonaws.kafka#ClusterType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the cluster. The possible states are PROVISIONED or SERVERLESS.

                                                                      ", + "smithy.api#jsonName": "clusterType" + } + } + } + }, "com.amazonaws.kafka#CreateConfiguration": { "type": "operation", "input": { @@ -1487,6 +1695,65 @@ } } }, + "com.amazonaws.kafka#DescribeClusterV2": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafka#DescribeClusterV2Request" + }, + "output": { + "target": "com.amazonaws.kafka#DescribeClusterV2Response" + }, + "errors": [ + { + "target": "com.amazonaws.kafka#BadRequestException" + }, + { + "target": "com.amazonaws.kafka#ForbiddenException" + }, + { + "target": "com.amazonaws.kafka#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafka#NotFoundException" + }, + { + "target": "com.amazonaws.kafka#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns a description of the MSK cluster whose Amazon Resource Name (ARN) is specified in the request.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/api/v2/clusters/{ClusterArn}", + "code": 200 + } + } + }, + "com.amazonaws.kafka#DescribeClusterV2Request": { + "type": "structure", + "members": { + "ClusterArn": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kafka#DescribeClusterV2Response": { + "type": "structure", + "members": { + "ClusterInfo": { + "target": "com.amazonaws.kafka#Cluster", + "traits": { + "smithy.api#documentation": "

                                                                      The cluster information.

                                                                      ", + "smithy.api#jsonName": "clusterInfo" + } + } + } + }, "com.amazonaws.kafka#DescribeConfiguration": { "type": "operation", "input": { @@ -2116,6 +2383,9 @@ { "target": "com.amazonaws.kafka#CreateCluster" }, + { + "target": "com.amazonaws.kafka#CreateClusterV2" + }, { "target": "com.amazonaws.kafka#CreateConfiguration" }, @@ -2131,6 +2401,9 @@ { "target": "com.amazonaws.kafka#DescribeClusterOperation" }, + { + "target": "com.amazonaws.kafka#DescribeClusterV2" + }, { "target": "com.amazonaws.kafka#DescribeConfiguration" }, @@ -2149,6 +2422,9 @@ { "target": "com.amazonaws.kafka#ListClusters" }, + { + "target": "com.amazonaws.kafka#ListClustersV2" + }, { "target": "com.amazonaws.kafka#ListConfigurationRevisions" }, @@ -2277,12 +2553,94 @@ "com.amazonaws.kafka#ListClusterOperationsRequest": { "type": "structure", "members": { - "ClusterArn": { + "ClusterArn": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "MaxResults": { + "target": "com.amazonaws.kafka#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in the response. If there are more results, the response includes a NextToken parameter.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "NextToken": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The paginated results marker. When the result of the operation is truncated, the call returns NextToken in the response. \n To get the next batch, provide this token in your next request.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.kafka#ListClusterOperationsResponse": { + "type": "structure", + "members": { + "ClusterOperationInfoList": { + "target": "com.amazonaws.kafka#__listOfClusterOperationInfo", + "traits": { + "smithy.api#documentation": "

                                                                      An array of cluster operation information objects.

                                                                      ", + "smithy.api#jsonName": "clusterOperationInfoList" + } + }, + "NextToken": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      If the response of ListClusterOperations is truncated, it returns a NextToken in the response. This Nexttoken should be sent in the subsequent request to ListClusterOperations.

                                                                      ", + "smithy.api#jsonName": "nextToken" + } + } + } + }, + "com.amazonaws.kafka#ListClusters": { + "type": "operation", + "input": { + "target": "com.amazonaws.kafka#ListClustersRequest" + }, + "output": { + "target": "com.amazonaws.kafka#ListClustersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.kafka#BadRequestException" + }, + { + "target": "com.amazonaws.kafka#ForbiddenException" + }, + { + "target": "com.amazonaws.kafka#InternalServerErrorException" + }, + { + "target": "com.amazonaws.kafka#UnauthorizedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns a list of all the MSK clusters in the current Region.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/v1/clusters", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "ClusterInfoList", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.kafka#ListClustersRequest": { + "type": "structure", + "members": { + "ClusterNameFilter": { "target": "com.amazonaws.kafka#__string", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) that uniquely identifies the cluster.

                                                                      ", - "smithy.api#httpLabel": {}, - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.

                                                                      ", + "smithy.api#httpQuery": "clusterNameFilter" } }, "MaxResults": { @@ -2301,32 +2659,32 @@ } } }, - "com.amazonaws.kafka#ListClusterOperationsResponse": { + "com.amazonaws.kafka#ListClustersResponse": { "type": "structure", "members": { - "ClusterOperationInfoList": { - "target": "com.amazonaws.kafka#__listOfClusterOperationInfo", + "ClusterInfoList": { + "target": "com.amazonaws.kafka#__listOfClusterInfo", "traits": { - "smithy.api#documentation": "

                                                                      An array of cluster operation information objects.

                                                                      ", - "smithy.api#jsonName": "clusterOperationInfoList" + "smithy.api#documentation": "

                                                                      Information on each of the MSK clusters in the response.

                                                                      ", + "smithy.api#jsonName": "clusterInfoList" } }, "NextToken": { "target": "com.amazonaws.kafka#__string", "traits": { - "smithy.api#documentation": "

                                                                      If the response of ListClusterOperations is truncated, it returns a NextToken in the response. This Nexttoken should be sent in the subsequent request to ListClusterOperations.

                                                                      ", + "smithy.api#documentation": "

                                                                      The paginated results marker. When the result of a ListClusters operation is truncated, the call returns NextToken in the response. \n To get another batch of clusters, provide this token in your next request.

                                                                      ", "smithy.api#jsonName": "nextToken" } } } }, - "com.amazonaws.kafka#ListClusters": { + "com.amazonaws.kafka#ListClustersV2": { "type": "operation", "input": { - "target": "com.amazonaws.kafka#ListClustersRequest" + "target": "com.amazonaws.kafka#ListClustersV2Request" }, "output": { - "target": "com.amazonaws.kafka#ListClustersResponse" + "target": "com.amazonaws.kafka#ListClustersV2Response" }, "errors": [ { @@ -2346,7 +2704,7 @@ "smithy.api#documentation": "

                                                                      Returns a list of all the MSK clusters in the current Region.

                                                                      ", "smithy.api#http": { "method": "GET", - "uri": "/v1/clusters", + "uri": "/api/v2/clusters", "code": 200 }, "smithy.api#paginated": { @@ -2357,16 +2715,23 @@ } } }, - "com.amazonaws.kafka#ListClustersRequest": { + "com.amazonaws.kafka#ListClustersV2Request": { "type": "structure", "members": { "ClusterNameFilter": { "target": "com.amazonaws.kafka#__string", "traits": { - "smithy.api#documentation": "

                                                                      Specify a prefix of the name of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specify a prefix of the names of the clusters that you want to list. The service lists all the clusters whose names start with this prefix.

                                                                      ", "smithy.api#httpQuery": "clusterNameFilter" } }, + "ClusterTypeFilter": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      Specify either PROVISIONED or SERVERLESS.

                                                                      ", + "smithy.api#httpQuery": "clusterTypeFilter" + } + }, "MaxResults": { "target": "com.amazonaws.kafka#MaxResults", "traits": { @@ -2383,11 +2748,11 @@ } } }, - "com.amazonaws.kafka#ListClustersResponse": { + "com.amazonaws.kafka#ListClustersV2Response": { "type": "structure", "members": { "ClusterInfoList": { - "target": "com.amazonaws.kafka#__listOfClusterInfo", + "target": "com.amazonaws.kafka#__listOfCluster", "traits": { "smithy.api#documentation": "

                                                                      Information on each of the MSK clusters in the response.

                                                                      ", "smithy.api#jsonName": "clusterInfoList" @@ -3170,6 +3535,160 @@ "smithy.api#documentation": "

                                                                      Prometheus settings.

                                                                      " } }, + "com.amazonaws.kafka#Provisioned": { + "type": "structure", + "members": { + "BrokerNodeGroupInfo": { + "target": "com.amazonaws.kafka#BrokerNodeGroupInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the brokers.

                                                                      ", + "smithy.api#jsonName": "brokerNodeGroupInfo", + "smithy.api#required": {} + } + }, + "CurrentBrokerSoftwareInfo": { + "target": "com.amazonaws.kafka#BrokerSoftwareInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the Apache Kafka version deployed on the brokers.

                                                                      ", + "smithy.api#jsonName": "currentBrokerSoftwareInfo" + } + }, + "ClientAuthentication": { + "target": "com.amazonaws.kafka#ClientAuthentication", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all client authentication information.

                                                                      ", + "smithy.api#jsonName": "clientAuthentication" + } + }, + "EncryptionInfo": { + "target": "com.amazonaws.kafka#EncryptionInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all encryption-related information.

                                                                      ", + "smithy.api#jsonName": "encryptionInfo" + } + }, + "EnhancedMonitoring": { + "target": "com.amazonaws.kafka#EnhancedMonitoring", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.

                                                                      ", + "smithy.api#jsonName": "enhancedMonitoring" + } + }, + "OpenMonitoring": { + "target": "com.amazonaws.kafka#OpenMonitoringInfo", + "traits": { + "smithy.api#documentation": "

                                                                      The settings for open monitoring.

                                                                      ", + "smithy.api#jsonName": "openMonitoring" + } + }, + "LoggingInfo": { + "target": "com.amazonaws.kafka#LoggingInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Log delivery information for the cluster.

                                                                      ", + "smithy.api#jsonName": "loggingInfo" + } + }, + "NumberOfBrokerNodes": { + "target": "com.amazonaws.kafka#__integerMin1Max15", + "traits": { + "smithy.api#documentation": "

                                                                      The number of broker nodes in the cluster.

                                                                      ", + "smithy.api#jsonName": "numberOfBrokerNodes", + "smithy.api#required": {} + } + }, + "ZookeeperConnectString": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The connection string to use to connect to the Apache ZooKeeper cluster.

                                                                      ", + "smithy.api#jsonName": "zookeeperConnectString" + } + }, + "ZookeeperConnectStringTls": { + "target": "com.amazonaws.kafka#__string", + "traits": { + "smithy.api#documentation": "

                                                                      The connection string to use to connect to the Apache ZooKeeper cluster on a TLS port.

                                                                      ", + "smithy.api#jsonName": "zookeeperConnectStringTls" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Provisioned cluster.

                                                                      " + } + }, + "com.amazonaws.kafka#ProvisionedRequest": { + "type": "structure", + "members": { + "BrokerNodeGroupInfo": { + "target": "com.amazonaws.kafka#BrokerNodeGroupInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the brokers.

                                                                      ", + "smithy.api#jsonName": "brokerNodeGroupInfo", + "smithy.api#required": {} + } + }, + "ClientAuthentication": { + "target": "com.amazonaws.kafka#ClientAuthentication", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all client authentication information.

                                                                      ", + "smithy.api#jsonName": "clientAuthentication" + } + }, + "ConfigurationInfo": { + "target": "com.amazonaws.kafka#ConfigurationInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Represents the configuration that you want Amazon MSK to use for the brokers in a cluster.

                                                                      ", + "smithy.api#jsonName": "configurationInfo" + } + }, + "EncryptionInfo": { + "target": "com.amazonaws.kafka#EncryptionInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all encryption-related information.

                                                                      ", + "smithy.api#jsonName": "encryptionInfo" + } + }, + "EnhancedMonitoring": { + "target": "com.amazonaws.kafka#EnhancedMonitoring", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the level of monitoring for the MSK cluster. The possible values are DEFAULT, PER_BROKER, PER_TOPIC_PER_BROKER, and PER_TOPIC_PER_PARTITION.

                                                                      ", + "smithy.api#jsonName": "enhancedMonitoring" + } + }, + "OpenMonitoring": { + "target": "com.amazonaws.kafka#OpenMonitoringInfo", + "traits": { + "smithy.api#documentation": "

                                                                      The settings for open monitoring.

                                                                      ", + "smithy.api#jsonName": "openMonitoring" + } + }, + "KafkaVersion": { + "target": "com.amazonaws.kafka#__stringMin1Max128", + "traits": { + "smithy.api#documentation": "

                                                                      The Apache Kafka version that you want for the cluster.

                                                                      ", + "smithy.api#jsonName": "kafkaVersion", + "smithy.api#required": {} + } + }, + "LoggingInfo": { + "target": "com.amazonaws.kafka#LoggingInfo", + "traits": { + "smithy.api#documentation": "

                                                                      Log delivery information for the cluster.

                                                                      ", + "smithy.api#jsonName": "loggingInfo" + } + }, + "NumberOfBrokerNodes": { + "target": "com.amazonaws.kafka#__integerMin1Max15", + "traits": { + "smithy.api#documentation": "

                                                                      The number of broker nodes in the cluster.

                                                                      ", + "smithy.api#jsonName": "numberOfBrokerNodes", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Provisioned cluster request.

                                                                      " + } + }, "com.amazonaws.kafka#PublicAccess": { "type": "structure", "members": { @@ -3329,6 +3848,82 @@ "smithy.api#documentation": "

                                                                      Details for SASL/SCRAM client authentication.

                                                                      " } }, + "com.amazonaws.kafka#Serverless": { + "type": "structure", + "members": { + "VpcConfigs": { + "target": "com.amazonaws.kafka#__listOfVpcConfig", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of the Amazon VPCs for the cluster.

                                                                      ", + "smithy.api#jsonName": "vpcConfigs", + "smithy.api#required": {} + } + }, + "ClientAuthentication": { + "target": "com.amazonaws.kafka#ServerlessClientAuthentication", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all client authentication information.

                                                                      ", + "smithy.api#jsonName": "clientAuthentication" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Serverless cluster.

                                                                      " + } + }, + "com.amazonaws.kafka#ServerlessClientAuthentication": { + "type": "structure", + "members": { + "Sasl": { + "target": "com.amazonaws.kafka#ServerlessSasl", + "traits": { + "smithy.api#documentation": "

                                                                      Details for ClientAuthentication using SASL.

                                                                      ", + "smithy.api#jsonName": "sasl" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Includes all client authentication information.

                                                                      " + } + }, + "com.amazonaws.kafka#ServerlessRequest": { + "type": "structure", + "members": { + "VpcConfigs": { + "target": "com.amazonaws.kafka#__listOfVpcConfig", + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of the Amazon VPCs for the cluster.

                                                                      ", + "smithy.api#jsonName": "vpcConfigs", + "smithy.api#required": {} + } + }, + "ClientAuthentication": { + "target": "com.amazonaws.kafka#ServerlessClientAuthentication", + "traits": { + "smithy.api#documentation": "

                                                                      Includes all client authentication information.

                                                                      ", + "smithy.api#jsonName": "clientAuthentication" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Serverless cluster request.

                                                                      " + } + }, + "com.amazonaws.kafka#ServerlessSasl": { + "type": "structure", + "members": { + "Iam": { + "target": "com.amazonaws.kafka#Iam", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates whether IAM access control is enabled.

                                                                      ", + "smithy.api#jsonName": "iam" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Details for client authentication using SASL.

                                                                      " + } + }, "com.amazonaws.kafka#ServiceUnavailableException": { "type": "structure", "members": { @@ -4383,6 +4978,29 @@ } } }, + "com.amazonaws.kafka#VpcConfig": { + "type": "structure", + "members": { + "SubnetIds": { + "target": "com.amazonaws.kafka#__listOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      The IDs of the subnets associated with the cluster.

                                                                      ", + "smithy.api#jsonName": "subnetIds", + "smithy.api#required": {} + } + }, + "SecurityGroupIds": { + "target": "com.amazonaws.kafka#__listOf__string", + "traits": { + "smithy.api#documentation": "

                                                                      The IDs of the security groups associated with the cluster.

                                                                      ", + "smithy.api#jsonName": "securityGroupIds" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The configuration of the Amazon VPCs for the cluster.

                                                                      " + } + }, "com.amazonaws.kafka#ZookeeperNodeInfo": { "type": "structure", "members": { @@ -4462,6 +5080,12 @@ "target": "com.amazonaws.kafka#BrokerEBSVolumeInfo" } }, + "com.amazonaws.kafka#__listOfCluster": { + "type": "list", + "member": { + "target": "com.amazonaws.kafka#Cluster" + } + }, "com.amazonaws.kafka#__listOfClusterInfo": { "type": "list", "member": { @@ -4516,6 +5140,12 @@ "target": "com.amazonaws.kafka#UnprocessedScramSecret" } }, + "com.amazonaws.kafka#__listOfVpcConfig": { + "type": "list", + "member": { + "target": "com.amazonaws.kafka#VpcConfig" + } + }, "com.amazonaws.kafka#__listOf__string": { "type": "list", "member": { diff --git a/codegen/sdk-codegen/aws-models/kinesis.json b/codegen/sdk-codegen/aws-models/kinesis.json index e7b03588dd3a..e18eec9a8b9a 100644 --- a/codegen/sdk-codegen/aws-models/kinesis.json +++ b/codegen/sdk-codegen/aws-models/kinesis.json @@ -49,7 +49,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Adds or updates tags for the specified Kinesis data stream. Each time you invoke\n this operation, you can specify up to 10 tags. If you want to add more than 10 tags to\n your stream, you can invoke this operation multiple times. In total, each stream can\n have up to 50 tags.

                                                                      \n

                                                                      If tags have already been assigned to the stream, AddTagsToStream\n overwrites any existing tags that correspond to the specified tag keys.

                                                                      \n

                                                                      \n AddTagsToStream has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Adds or updates tags for the specified Kinesis data stream. You can assign up to 50\n tags to a data stream.

                                                                      \n

                                                                      If tags have already been assigned to the stream, AddTagsToStream\n overwrites any existing tags that correspond to the specified tag keys.

                                                                      \n

                                                                      \n AddTagsToStream has a limit of five transactions per second per\n account.

                                                                      " } }, "com.amazonaws.kinesis#AddTagsToStreamInput": { @@ -86,12 +86,14 @@ "ShardId": { "target": "com.amazonaws.kinesis#ShardId", "traits": { + "smithy.api#documentation": "

                                                                      The shard ID of the existing child shard of the current shard.

                                                                      ", "smithy.api#required": {} } }, "ParentShards": { "target": "com.amazonaws.kinesis#ShardIdList", "traits": { + "smithy.api#documentation": "

                                                                      The current shard that is the parent of the existing child shard.

                                                                      ", "smithy.api#required": {} } }, @@ -101,6 +103,9 @@ "smithy.api#required": {} } } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Output parameter of the GetRecords API. The existing child shard of the current\n shard.

                                                                      " } }, "com.amazonaws.kinesis#ChildShardList": { @@ -152,7 +157,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "^(arn):aws.*:kinesis:.*:\\d{12}:.*stream\\/[a-zA-Z0-9_.-]+\\/consumer\\/[a-zA-Z0-9_.-]+:[0-9]+" + "smithy.api#pattern": "^(arn):aws.*:kinesis:.*:\\d{12}:.*stream\\/[a-zA-Z0-9_.-]+\\/consumer\\/[a-zA-Z0-9_.-]+:[0-9]+$" } }, "com.amazonaws.kinesis#ConsumerCountObject": { @@ -221,7 +226,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "[a-zA-Z0-9_.-]+" + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" } }, "com.amazonaws.kinesis#ConsumerStatus": { @@ -260,7 +265,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a Kinesis data stream. A stream captures and transports data records that\n are continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.

                                                                      \n

                                                                      You specify and control the number of shards that a stream is composed of. Each\n shard can support reads up to five transactions per second, up to a maximum data read\n total of 2 MiB per second. Each shard can support writes up to 1,000 records per second,\n up to a maximum data write total of 1 MiB per second. If the amount of data input\n increases or decreases, you can add or remove shards.

                                                                      \n

                                                                      The stream name identifies the stream. The name is scoped to the AWS account used\n by the application. It is also scoped by AWS Region. That is, two streams in two\n different accounts can have the same name, and two streams in the same account, but in\n two different Regions, can have the same name.

                                                                      \n

                                                                      \n CreateStream is an asynchronous operation. Upon receiving a\n CreateStream request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE. You should perform read and write\n operations only on an ACTIVE stream.

                                                                      \n

                                                                      You receive a LimitExceededException when making a\n CreateStream request when you try to do one of the following:

                                                                      \n
                                                                        \n
                                                                      • \n \n

                                                                        Have more than five streams in the CREATING state at any point\n in time.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        Create more shards than are authorized for your account.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For the default shard limit for an AWS account, see Amazon Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide. To increase this limit, contact AWS\n Support.

                                                                      \n

                                                                      You can use DescribeStream to check the stream status, which is\n returned in StreamStatus.

                                                                      \n

                                                                      \n CreateStream has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Creates a Kinesis data stream. A stream captures and transports data records that are\n continuously emitted from different data sources or producers.\n Scale-out within a stream is explicitly supported by means of shards, which are uniquely\n identified groups of data records in a stream.

                                                                      \n

                                                                      You specify and control the number of shards that a stream is composed of. Each shard\n can support reads up to five transactions per second, up to a maximum data read total of\n 2 MiB per second. Each shard can support writes up to 1,000 records per second, up to a\n maximum data write total of 1 MiB per second. If the amount of data input increases or\n decreases, you can add or remove shards.

                                                                      \n

                                                                      The stream name identifies the stream. The name is scoped to the Amazon Web Services\n account used by the application. It is also scoped by Amazon Web Services Region. That\n is, two streams in two different accounts can have the same name, and two streams in the\n same account, but in two different Regions, can have the same name.

                                                                      \n

                                                                      \n CreateStream is an asynchronous operation. Upon receiving a\n CreateStream request, Kinesis Data Streams immediately returns and sets\n the stream status to CREATING. After the stream is created, Kinesis Data\n Streams sets the stream status to ACTIVE. You should perform read and write\n operations only on an ACTIVE stream.

                                                                      \n

                                                                      You receive a LimitExceededException when making a\n CreateStream request when you try to do one of the following:

                                                                      \n
                                                                        \n
                                                                      • \n \n

                                                                        Have more than five streams in the CREATING state at any point in\n time.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        Create more shards than are authorized for your account.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For the default shard limit for an Amazon Web Services account, see Amazon\n Kinesis Data Streams Limits in the Amazon Kinesis Data Streams\n Developer Guide. To increase this limit, contact Amazon Web Services\n Support.

                                                                      \n

                                                                      You can use DescribeStreamSummary to check the stream status, which\n is returned in StreamStatus.

                                                                      \n

                                                                      \n CreateStream has a limit of five transactions per second per\n account.

                                                                      " } }, "com.amazonaws.kinesis#CreateStreamInput": { @@ -269,15 +274,20 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "

                                                                      A name to identify the stream. The stream name is scoped to the AWS account used by\n the application that creates the stream. It is also scoped by AWS Region. That is, two\n streams in two different AWS accounts can have the same name. Two streams in the same\n AWS account but in two different Regions can also have the same name.

                                                                      ", + "smithy.api#documentation": "

                                                                      A name to identify the stream. The stream name is scoped to the Amazon Web Services\n account used by the application that creates the stream. It is also scoped by Amazon Web Services Region. That is, two streams in two different Amazon Web Services accounts\n can have the same name. Two streams in the same Amazon Web Services account but in two\n different Regions can also have the same name.

                                                                      ", "smithy.api#required": {} } }, "ShardCount": { "target": "com.amazonaws.kinesis#PositiveIntegerObject", "traits": { - "smithy.api#documentation": "

                                                                      The number of shards that the stream will use. The throughput of the stream is a\n function of the number of shards; more shards are required for greater provisioned\n throughput.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The number of shards that the stream will use. The throughput of the stream is a\n function of the number of shards; more shards are required for greater provisioned\n throughput.

                                                                      " + } + }, + "StreamModeDetails": { + "target": "com.amazonaws.kinesis#StreamModeDetails", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates the capacity mode of the data stream. Currently, in Kinesis Data Streams,\n you can choose between an on-demand capacity mode and a\n provisioned capacity mode for your data\n streams.

                                                                      " } } }, @@ -314,7 +324,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Decreases the Kinesis data stream's retention period, which is the length of time\n data records are accessible after they are added to the stream. The minimum value of a\n stream's retention period is 24 hours.

                                                                      \n

                                                                      This operation may result in lost data. For example, if the stream's retention\n period is 48 hours and is decreased to 24 hours, any data already in the stream that is\n older than 24 hours is inaccessible.

                                                                      " + "smithy.api#documentation": "

                                                                      Decreases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The minimum value of a\n stream's retention period is 24 hours.

                                                                      \n

                                                                      This operation may result in lost data. For example, if the stream's retention period\n is 48 hours and is decreased to 24 hours, any data already in the stream that is older\n than 24 hours is inaccessible.

                                                                      " } }, "com.amazonaws.kinesis#DecreaseStreamRetentionPeriodInput": { @@ -356,7 +366,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes a Kinesis data stream and all its shards and data. You must shut down any\n applications that are operating on the stream before you delete the stream. If an\n application attempts to operate on a deleted stream, it receives the exception\n ResourceNotFoundException.

                                                                      \n

                                                                      If the stream is in the ACTIVE state, you can delete it. After a\n DeleteStream request, the specified stream is in the\n DELETING state until Kinesis Data Streams completes the\n deletion.

                                                                      \n

                                                                      \n Note: Kinesis Data Streams might continue to accept\n data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the\n DELETING state until the stream deletion is complete.

                                                                      \n

                                                                      When you delete a stream, any shards in that stream are also deleted, and any tags\n are dissociated from the stream.

                                                                      \n

                                                                      You can use the DescribeStream operation to check the state of\n the stream, which is returned in StreamStatus.

                                                                      \n

                                                                      \n DeleteStream has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes a Kinesis data stream and all its shards and data. You must shut down any\n applications that are operating on the stream before you delete the stream. If an\n application attempts to operate on a deleted stream, it receives the exception\n ResourceNotFoundException.

                                                                      \n

                                                                      If the stream is in the ACTIVE state, you can delete it. After a\n DeleteStream request, the specified stream is in the\n DELETING state until Kinesis Data Streams completes the\n deletion.

                                                                      \n

                                                                      \n Note: Kinesis Data Streams might continue to accept\n data read and write operations, such as PutRecord, PutRecords, and GetRecords, on a stream in the\n DELETING state until the stream deletion is complete.

                                                                      \n

                                                                      When you delete a stream, any shards in that stream are also deleted, and any tags are\n dissociated from the stream.

                                                                      \n

                                                                      You can use the DescribeStreamSummary operation to check the state\n of the stream, which is returned in StreamStatus.

                                                                      \n \n

                                                                      \n DeleteStream has a limit of five transactions per second per\n account.

                                                                      " } }, "com.amazonaws.kinesis#DeleteStreamInput": { @@ -406,7 +416,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                                                                      " + "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.

                                                                      " } }, "ConsumerName": { @@ -460,6 +470,20 @@ "smithy.api#documentation": "

                                                                      The number of open shards.

                                                                      ", "smithy.api#required": {} } + }, + "OnDemandStreamCount": { + "target": "com.amazonaws.kinesis#OnDemandStreamCountObject", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates the number of data streams with the on-demand capacity mode.

                                                                      ", + "smithy.api#required": {} + } + }, + "OnDemandStreamCountLimit": { + "target": "com.amazonaws.kinesis#OnDemandStreamCountLimitObject", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of data streams with the on-demand capacity mode.

                                                                      ", + "smithy.api#required": {} + } } } }, @@ -480,7 +504,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes the specified Kinesis data stream.

                                                                      \n\n

                                                                      The information returned includes the stream name, Amazon Resource Name (ARN),\n creation time, enhanced metric configuration, and shard map. The shard map is an array\n of shard objects. For each shard object, there is the hash key and sequence number\n ranges that the shard spans, and the IDs of any earlier shards that played in a role in\n creating the shard. Every record ingested in the stream is identified by a sequence\n number, which is assigned when the record is put into the stream.

                                                                      \n\n

                                                                      You can limit the number of shards returned by each call. For more information, see\n Retrieving\n Shards from a Stream in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      There are no guarantees about the chronological order shards returned. To process\n shards in chronological order, use the ID of the parent shard to track the lineage to\n the oldest shard.

                                                                      \n

                                                                      This operation has a limit of 10 transactions per second per account.

                                                                      ", + "smithy.api#documentation": "

                                                                      Describes the specified Kinesis data stream.

                                                                      \n \n

                                                                      This API has been revised. It's highly recommended that you use the DescribeStreamSummary API to get a summarized description of the\n specified Kinesis data stream and the ListShards API to list the\n shards in a specified data stream and obtain information about each shard.

                                                                      \n
                                                                      \n

                                                                      The information returned includes the stream name, Amazon Resource Name (ARN),\n creation time, enhanced metric configuration, and shard map. The shard map is an array\n of shard objects. For each shard object, there is the hash key and sequence number\n ranges that the shard spans, and the IDs of any earlier shards that played in a role in\n creating the shard. Every record ingested in the stream is identified by a sequence\n number, which is assigned when the record is put into the stream.

                                                                      \n\n

                                                                      You can limit the number of shards returned by each call. For more information, see\n Retrieving\n Shards from a Stream in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      There are no guarantees about the chronological order shards returned. To process\n shards in chronological order, use the ID of the parent shard to track the lineage to\n the oldest shard.

                                                                      \n

                                                                      This operation has a limit of 10 transactions per second per account.

                                                                      ", "smithy.waiters#waitable": { "StreamExists": { "acceptors": [ @@ -540,7 +564,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                                                                      " + "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that the consumer is registered with. For more\n information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.

                                                                      " } }, "ConsumerName": { @@ -582,13 +606,13 @@ "Limit": { "target": "com.amazonaws.kinesis#DescribeStreamInputLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of shards to return in a single call. The default value is 100.\n If you specify a value greater than 100, at most 100 shards are returned.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of shards to return in a single call. The default value is 100. If\n you specify a value greater than 100, at most 100 results are returned.

                                                                      " } }, "ExclusiveStartShardId": { "target": "com.amazonaws.kinesis#ShardId", "traits": { - "smithy.api#documentation": "

                                                                      The shard ID of the shard to start with.

                                                                      " + "smithy.api#documentation": "

                                                                      The shard ID of the shard to start with.

                                                                      \n

                                                                      Specify this parameter to indicate that you want to describe the stream starting with\n the shard whose ID immediately follows ExclusiveStartShardId.

                                                                      \n

                                                                      If you don't specify this parameter, the default behavior for\n DescribeStream is to describe the stream starting with the first shard\n in the stream.

                                                                      " } } }, @@ -612,7 +636,7 @@ "StreamDescription": { "target": "com.amazonaws.kinesis#StreamDescription", "traits": { - "smithy.api#documentation": "

                                                                      The current status of the stream, the stream Amazon Resource Name (ARN), an array\n of shard objects that comprise the stream, and whether there are more shards\n available.

                                                                      ", + "smithy.api#documentation": "

                                                                      The current status of the stream, the stream Amazon Resource Name (ARN), an array of\n shard objects that comprise the stream, and whether there are more shards\n available.

                                                                      ", "smithy.api#required": {} } } @@ -638,7 +662,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Provides a summarized description of the specified Kinesis data stream without the\n shard list.

                                                                      \n

                                                                      The information returned includes the stream name, Amazon Resource Name (ARN),\n status, record retention period, approximate creation time, monitoring, encryption\n details, and open shard count.

                                                                      \n

                                                                      \n DescribeStreamSummary has a limit of 20 transactions per second\n per account.

                                                                      " + "smithy.api#documentation": "

                                                                      Provides a summarized description of the specified Kinesis data stream without the\n shard list.

                                                                      \n

                                                                      The information returned includes the stream name, Amazon Resource Name (ARN), status,\n record retention period, approximate creation time, monitoring, encryption details, and\n open shard count.

                                                                      \n

                                                                      \n DescribeStreamSummary has a limit of 20 transactions per second per\n account.

                                                                      " } }, "com.amazonaws.kinesis#DescribeStreamSummaryInput": { @@ -697,14 +721,14 @@ "StreamName": { "target": "com.amazonaws.kinesis#StreamName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the Kinesis data stream for which to disable enhanced\n monitoring.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the Kinesis data stream for which to disable enhanced monitoring.

                                                                      ", "smithy.api#required": {} } }, "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "

                                                                      List of shard-level metrics to disable.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\"\n disables every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      List of shard-level metrics to disable.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\" disables\n every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      ", "smithy.api#required": {} } } @@ -752,7 +776,7 @@ "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "

                                                                      List of shard-level metrics to enable.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\"\n enables every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      List of shard-level metrics to enable.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\" enables\n every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      ", "smithy.api#required": {} } } @@ -782,7 +806,7 @@ "ShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "

                                                                      List of shard-level metrics.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\"\n enhances every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      List of shard-level metrics.

                                                                      \n

                                                                      The following are the valid shard-level metrics. The value \"ALL\" enhances\n every metric.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n IncomingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IncomingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingBytes\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n OutgoingRecords\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n WriteProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ReadProvisionedThroughputExceeded\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n IteratorAgeMilliseconds\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ALL\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information, see Monitoring the Amazon\n Kinesis Data Streams Service with Amazon CloudWatch in the Amazon\n Kinesis Data Streams Developer Guide.

                                                                      " } } }, @@ -808,13 +832,13 @@ "CurrentShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "

                                                                      Represents the current state of the metrics that are in the enhanced state before\n the operation.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents the current state of the metrics that are in the enhanced state before the\n operation.

                                                                      " } }, "DesiredShardLevelMetrics": { "target": "com.amazonaws.kinesis#MetricsNameList", "traits": { - "smithy.api#documentation": "

                                                                      Represents the list of all the metrics that would be in the enhanced state after\n the operation.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents the list of all the metrics that would be in the enhanced state after the\n operation.

                                                                      " } } }, @@ -896,7 +920,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Gets data records from a Kinesis data stream's shard.

                                                                      \n

                                                                      Specify a shard iterator using the ShardIterator parameter. The shard\n iterator specifies the position in the shard from which you want to start reading data\n records sequentially. If there are no records available in the portion of the shard that\n the iterator points to, GetRecords returns an empty list. It might\n take multiple calls to get to a portion of the shard that contains records.

                                                                      \n

                                                                      You can scale by provisioning multiple shards per stream while considering service\n limits (for more information, see Amazon Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide). Your application should have one thread per shard, each reading\n continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the\n shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in\n NextShardIterator. Specify the shard iterator returned in\n NextShardIterator in subsequent calls to GetRecords.\n If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator.\n You can terminate the loop when the shard is closed, or when the shard iterator reaches\n the record with the sequence number or other attribute that marks it as the last record\n to process.

                                                                      \n

                                                                      Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB\n per second. You can ensure that your calls don't exceed the maximum supported size or\n throughput by using the Limit parameter to specify the maximum number of\n records that GetRecords can return. Consider your average record size\n when determining this limit. The maximum number of records that can be returned per call\n is 10,000.

                                                                      \n\n

                                                                      The size of the data returned by GetRecords varies depending on\n the utilization of the shard. The maximum size of data that GetRecords\n can return is 10 MiB. If a call returns this amount of data, subsequent calls made\n within the next 5 seconds throw ProvisionedThroughputExceededException. If\n there is insufficient provisioned throughput on the stream, subsequent calls made within\n the next 1 second throw ProvisionedThroughputExceededException. GetRecords doesn't return any data when it throws an exception. For this\n reason, we recommend that you wait 1 second between calls to GetRecords. However, it's possible that the application will get exceptions for longer than 1\n second.

                                                                      \n

                                                                      To detect whether the application is falling behind in processing, you can use the\n MillisBehindLatest response attribute. You can also monitor the stream\n using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon\n Kinesis Data Streams Developer Guide).

                                                                      \n

                                                                      Each Amazon Kinesis record includes a value,\n ApproximateArrivalTimestamp, that is set when a stream successfully\n receives and stores a record. This is commonly referred to as a server-side time stamp,\n whereas a client-side time stamp is set when a data producer creates or sends the record\n to a stream (a data producer is any data source putting data records into a stream, for\n example with PutRecords). The time stamp has millisecond precision.\n There are no guarantees about the time stamp accuracy, or that the time stamp is always\n increasing. For example, records in a shard or across a stream might have time stamps\n that are out of order.

                                                                      \n

                                                                      This operation has a limit of five transactions per second per shard.

                                                                      " + "smithy.api#documentation": "

                                                                      Gets data records from a Kinesis data stream's shard.

                                                                      \n

                                                                      Specify a shard iterator using the ShardIterator parameter. The shard\n iterator specifies the position in the shard from which you want to start reading data\n records sequentially. If there are no records available in the portion of the shard that\n the iterator points to, GetRecords returns an empty list. It might\n take multiple calls to get to a portion of the shard that contains records.

                                                                      \n

                                                                      You can scale by provisioning multiple shards per stream while considering service\n limits (for more information, see Amazon Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide). Your application should have one thread per shard, each reading\n continuously from its stream. To read from a stream continually, call GetRecords in a loop. Use GetShardIterator to get the\n shard iterator to specify in the first GetRecords call. GetRecords returns a new shard iterator in\n NextShardIterator. Specify the shard iterator returned in\n NextShardIterator in subsequent calls to GetRecords.\n If the shard has been closed, the shard iterator can't return more data and GetRecords returns null in NextShardIterator.\n You can terminate the loop when the shard is closed, or when the shard iterator reaches\n the record with the sequence number or other attribute that marks it as the last record\n to process.

                                                                      \n

                                                                      Each data record can be up to 1 MiB in size, and each shard can read up to 2 MiB per\n second. You can ensure that your calls don't exceed the maximum supported size or\n throughput by using the Limit parameter to specify the maximum number of\n records that GetRecords can return. Consider your average record size\n when determining this limit. The maximum number of records that can be returned per call\n is 10,000.

                                                                      \n\n

                                                                      The size of the data returned by GetRecords varies depending on the\n utilization of the shard. It is recommended that consumer applications retrieve records\n via the GetRecords command using the 5 TPS limit to remain caught up.\n Retrieving records less frequently can lead to consumer applications falling behind. The\n maximum size of data that GetRecords can return is 10 MiB. If a call\n returns this amount of data, subsequent calls made within the next 5 seconds throw\n ProvisionedThroughputExceededException. If there is insufficient\n provisioned throughput on the stream, subsequent calls made within the next 1 second\n throw ProvisionedThroughputExceededException. GetRecords\n doesn't return any data when it throws an exception. For this reason, we recommend that\n you wait 1 second between calls to GetRecords. However, it's possible\n that the application will get exceptions for longer than 1 second.

                                                                      \n\n\n\n\n\n\n\n\n

                                                                      To detect whether the application is falling behind in processing, you can use the\n MillisBehindLatest response attribute. You can also monitor the stream\n using CloudWatch metrics and other mechanisms (see Monitoring in the Amazon\n Kinesis Data Streams Developer Guide).

                                                                      \n

                                                                      Each Amazon Kinesis record includes a value, ApproximateArrivalTimestamp,\n that is set when a stream successfully receives and stores a record. This is commonly\n referred to as a server-side time stamp, whereas a client-side time stamp is set when a\n data producer creates or sends the record to a stream (a data producer is any data\n source putting data records into a stream, for example with PutRecords). The time stamp has millisecond precision. There are no guarantees about the time\n stamp accuracy, or that the time stamp is always increasing. For example, records in a\n shard or across a stream might have time stamps that are out of order.

                                                                      \n

                                                                      This operation has a limit of five transactions per second per shard.

                                                                      " } }, "com.amazonaws.kinesis#GetRecordsInput": { @@ -943,17 +967,20 @@ "NextShardIterator": { "target": "com.amazonaws.kinesis#ShardIterator", "traits": { - "smithy.api#documentation": "

                                                                      The next position in the shard from which to start sequentially reading data\n records. If set to null, the shard has been closed and the requested\n iterator does not return any more data.

                                                                      " + "smithy.api#documentation": "

                                                                      The next position in the shard from which to start sequentially reading data records.\n If set to null, the shard has been closed and the requested iterator does\n not return any more data.

                                                                      " } }, "MillisBehindLatest": { "target": "com.amazonaws.kinesis#MillisBehindLatest", "traits": { - "smithy.api#documentation": "

                                                                      The number of milliseconds the GetRecords response is from the\n tip of the stream, indicating how far behind current time the consumer is. A value of\n zero indicates that record processing is caught up, and there are no new records to\n process at this moment.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of milliseconds the GetRecords response is from the tip\n of the stream, indicating how far behind current time the consumer is. A value of zero\n indicates that record processing is caught up, and there are no new records to process\n at this moment.

                                                                      " } }, "ChildShards": { - "target": "com.amazonaws.kinesis#ChildShardList" + "target": "com.amazonaws.kinesis#ChildShardList", + "traits": { + "smithy.api#documentation": "

                                                                      The list of the current shard's child shards, returned in the GetRecords\n API's response only when the end of the current shard is reached.

                                                                      " + } } }, "traits": { @@ -980,7 +1007,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it\n is returned to the requester.

                                                                      \n

                                                                      A shard iterator specifies the shard position from which to start reading data\n records sequentially. The position is specified using the sequence number of a data\n record in a shard. A sequence number is the identifier associated with every record\n ingested in the stream, and is assigned when a record is put into the stream. Each\n stream has one or more shards.

                                                                      \n

                                                                      You must specify the shard iterator type. For example, you can set the\n ShardIteratorType parameter to read exactly from the position denoted\n by a specific sequence number by using the AT_SEQUENCE_NUMBER shard\n iterator type. Alternatively, the parameter can read right after the sequence number by\n using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers\n returned by earlier calls to PutRecord, PutRecords,\n GetRecords, or DescribeStream. In the request,\n you can specify the shard iterator type AT_TIMESTAMP to read records from\n an arbitrary point in time, TRIM_HORIZON to cause\n ShardIterator to point to the last untrimmed record in the shard in the\n system (the oldest data record in the shard), or LATEST so that you always\n read the most recent data in the shard.

                                                                      \n

                                                                      When you read repeatedly from a stream, use a GetShardIterator\n request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard\n iterator is returned by every GetRecords request in\n NextShardIterator, which you use in the ShardIterator\n parameter of the next GetRecords request.

                                                                      \n

                                                                      If a GetShardIterator request is made too often, you receive a\n ProvisionedThroughputExceededException. For more information about\n throughput limits, see GetRecords, and Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide.

                                                                      \n

                                                                      If the shard is closed, GetShardIterator returns a valid iterator\n for the last sequence number of the shard. A shard can be closed as a result of using\n SplitShard or MergeShards.

                                                                      \n

                                                                      \n GetShardIterator has a limit of five transactions per second per\n account per open shard.

                                                                      " + "smithy.api#documentation": "

                                                                      Gets an Amazon Kinesis shard iterator. A shard iterator expires 5 minutes after it is\n returned to the requester.

                                                                      \n

                                                                      A shard iterator specifies the shard position from which to start reading data records\n sequentially. The position is specified using the sequence number of a data record in a\n shard. A sequence number is the identifier associated with every record ingested in the\n stream, and is assigned when a record is put into the stream. Each stream has one or\n more shards.

                                                                      \n

                                                                      You must specify the shard iterator type. For example, you can set the\n ShardIteratorType parameter to read exactly from the position denoted\n by a specific sequence number by using the AT_SEQUENCE_NUMBER shard\n iterator type. Alternatively, the parameter can read right after the sequence number by\n using the AFTER_SEQUENCE_NUMBER shard iterator type, using sequence numbers\n returned by earlier calls to PutRecord, PutRecords,\n GetRecords, or DescribeStream. In the request,\n you can specify the shard iterator type AT_TIMESTAMP to read records from\n an arbitrary point in time, TRIM_HORIZON to cause\n ShardIterator to point to the last untrimmed record in the shard in the\n system (the oldest data record in the shard), or LATEST so that you always\n read the most recent data in the shard.

                                                                      \n

                                                                      When you read repeatedly from a stream, use a GetShardIterator\n request to get the first shard iterator for use in your first GetRecords request and for subsequent reads use the shard iterator returned by the GetRecords request in NextShardIterator. A new shard\n iterator is returned by every GetRecords request in\n NextShardIterator, which you use in the ShardIterator\n parameter of the next GetRecords request.

                                                                      \n

                                                                      If a GetShardIterator request is made too often, you receive a\n ProvisionedThroughputExceededException. For more information about\n throughput limits, see GetRecords, and Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide.

                                                                      \n

                                                                      If the shard is closed, GetShardIterator returns a valid iterator\n for the last sequence number of the shard. A shard can be closed as a result of using\n SplitShard or MergeShards.

                                                                      \n

                                                                      \n GetShardIterator has a limit of five transactions per second per\n account per open shard.

                                                                      " } }, "com.amazonaws.kinesis#GetShardIteratorInput": { @@ -1003,14 +1030,14 @@ "ShardIteratorType": { "target": "com.amazonaws.kinesis#ShardIteratorType", "traits": { - "smithy.api#documentation": "

                                                                      Determines how the shard iterator is used to start reading data records from the\n shard.

                                                                      \n

                                                                      The following are the valid Amazon Kinesis shard iterator types:

                                                                      \n
                                                                        \n
                                                                      • \n \n

                                                                        AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific\n sequence number, provided in the value\n StartingSequenceNumber.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a\n specific sequence number, provided in the value\n StartingSequenceNumber.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        AT_TIMESTAMP - Start reading from the position denoted by a specific time\n stamp, provided in the value Timestamp.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        TRIM_HORIZON - Start reading at the last untrimmed record in the shard in\n the system, which is the oldest data record in the shard.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        LATEST - Start reading just after the most recent record in the shard, so\n that you always read the most recent data in the shard.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Determines how the shard iterator is used to start reading data records from the\n shard.

                                                                      \n

                                                                      The following are the valid Amazon Kinesis shard iterator types:

                                                                      \n
                                                                        \n
                                                                      • \n \n

                                                                        AT_SEQUENCE_NUMBER - Start reading from the position denoted by a specific\n sequence number, provided in the value\n StartingSequenceNumber.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        AFTER_SEQUENCE_NUMBER - Start reading right after the position denoted by a\n specific sequence number, provided in the value\n StartingSequenceNumber.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        AT_TIMESTAMP - Start reading from the position denoted by a specific time\n stamp, provided in the value Timestamp.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        TRIM_HORIZON - Start reading at the last untrimmed record in the shard in the\n system, which is the oldest data record in the shard.

                                                                        \n
                                                                      • \n
                                                                      • \n \n

                                                                        LATEST - Start reading just after the most recent record in the shard, so that\n you always read the most recent data in the shard.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, "StartingSequenceNumber": { "target": "com.amazonaws.kinesis#SequenceNumber", "traits": { - "smithy.api#documentation": "

                                                                      The sequence number of the data record in the shard from which to start reading.\n Used with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.

                                                                      " + "smithy.api#documentation": "

                                                                      The sequence number of the data record in the shard from which to start reading. Used\n with shard iterator type AT_SEQUENCE_NUMBER and AFTER_SEQUENCE_NUMBER.

                                                                      " } }, "Timestamp": { @@ -1041,7 +1068,7 @@ "com.amazonaws.kinesis#HashKey": { "type": "string", "traits": { - "smithy.api#pattern": "0|([1-9]\\d{0,38})" + "smithy.api#pattern": "^0|([1-9]\\d{0,38})$" } }, "com.amazonaws.kinesis#HashKeyRange": { @@ -1086,7 +1113,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Increases the Kinesis data stream's retention period, which is the length of time\n data records are accessible after they are added to the stream. The maximum value of a\n stream's retention period is 168 hours (7 days).

                                                                      \n

                                                                      If you choose a longer stream retention period, this operation increases the time\n period during which records that have not yet expired are accessible. However, it does\n not make previous, expired data (older than the stream's previous retention period)\n accessible after the operation has been called. For example, if a stream's retention\n period is set to 24 hours and is increased to 168 hours, any data that is older than 24\n hours remains inaccessible to consumer applications.

                                                                      " + "smithy.api#documentation": "

                                                                      Increases the Kinesis data stream's retention period, which is the length of time data\n records are accessible after they are added to the stream. The maximum value of a\n stream's retention period is 8760 hours (365 days).

                                                                      \n

                                                                      If you choose a longer stream retention period, this operation increases the time\n period during which records that have not yet expired are accessible. However, it does\n not make previous, expired data (older than the stream's previous retention period)\n accessible after the operation has been called. For example, if a stream's retention\n period is set to 24 hours and is increased to 168 hours, any data that is older than 24\n hours remains inaccessible to consumer applications.

                                                                      " } }, "com.amazonaws.kinesis#IncreaseStreamRetentionPeriodInput": { @@ -1179,7 +1206,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The request was rejected because the state of the specified resource isn't valid\n for this request. For more information, see How Key State Affects Use of a\n Customer Master Key in the AWS Key Management Service Developer\n Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The request was rejected because the state of the specified resource isn't valid for\n this request. For more information, see How Key State Affects Use of a\n Customer Master Key in the Amazon Web Services Key Management\n Service Developer Guide.

                                                                      ", "smithy.api#error": "client" } }, @@ -1209,7 +1236,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The AWS access key ID needs a subscription for the service.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Amazon Web Services access key ID needs a subscription for the service.

                                                                      ", "smithy.api#error": "client" } }, @@ -1224,7 +1251,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The request was denied due to request throttling. For more information about\n throttling, see Limits in\n the AWS Key Management Service Developer Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The request was denied due to request throttling. For more information about\n throttling, see Limits in\n the Amazon Web Services Key Management Service Developer\n Guide.

                                                                      ", "smithy.api#error": "client" } }, @@ -1239,6 +1266,24 @@ }, "com.amazonaws.kinesis#Kinesis_20131202": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Kinesis", + "arnNamespace": "kinesis", + "cloudFormationName": "Kinesis", + "cloudTrailEventSource": "kinesis.amazonaws.com", + "endpointPrefix": "kinesis" + }, + "aws.auth#sigv4": { + "name": "kinesis" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Amazon Kinesis Data Streams Service API Reference\n

                                                                      Amazon Kinesis Data Streams is a managed service that scales elastically for real-time\n processing of streaming big data.

                                                                      ", + "smithy.api#title": "Amazon Kinesis", + "smithy.api#xmlNamespace": { + "uri": "http://kinesis.amazonaws.com/doc/2013-12-02" + } + }, "version": "2013-12-02", "operations": [ { @@ -1324,26 +1369,11 @@ }, { "target": "com.amazonaws.kinesis#UpdateShardCount" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "Kinesis", - "arnNamespace": "kinesis", - "cloudFormationName": "Kinesis", - "cloudTrailEventSource": "kinesis.amazonaws.com", - "endpointPrefix": "kinesis" - }, - "aws.auth#sigv4": { - "name": "kinesis" }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Amazon Kinesis Data Streams Service API Reference\n

                                                                      Amazon Kinesis Data Streams is a managed service that scales elastically for\n real-time processing of streaming big data.

                                                                      ", - "smithy.api#title": "Amazon Kinesis", - "smithy.api#xmlNamespace": { - "uri": "http://kinesis.amazonaws.com/doc/2013-12-02" + { + "target": "com.amazonaws.kinesis#UpdateStreamMode" } - } + ] }, "com.amazonaws.kinesis#LimitExceededException": { "type": "structure", @@ -1356,7 +1386,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The requested resource exceeds the maximum number allowed, or the number of\n concurrent stream requests exceeds the maximum number allowed.

                                                                      ", + "smithy.api#documentation": "

                                                                      The requested resource exceeds the maximum number allowed, or the number of concurrent\n stream requests exceeds the maximum number allowed.

                                                                      ", "smithy.api#error": "client" } }, @@ -1386,7 +1416,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Lists the shards in a stream and provides information about each shard. This\n operation has a limit of 100 transactions per second per data stream.

                                                                      \n \n

                                                                      This API is a new operation that is used by the Amazon Kinesis Client Library\n (KCL). If you have a fine-grained IAM policy that only allows specific operations,\n you must update your policy to allow calls to this API. For more information, see\n Controlling Access to Amazon Kinesis Data Streams Resources Using\n IAM.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Lists the shards in a stream and provides information about each shard. This operation\n has a limit of 1000 transactions per second per data stream.

                                                                      \n

                                                                      This action does not list expired shards. For information about expired shards, see\n Data Routing, Data Persistence, and Shard State after a Reshard.

                                                                      \n \n

                                                                      This API is a new operation that is used by the Amazon Kinesis Client Library\n (KCL). If you have a fine-grained IAM policy that only allows specific operations,\n you must update your policy to allow calls to this API. For more information, see\n Controlling Access to Amazon Kinesis Data Streams Resources Using\n IAM.

                                                                      \n
                                                                      " } }, "com.amazonaws.kinesis#ListShardsInput": { @@ -1401,29 +1431,32 @@ "NextToken": { "target": "com.amazonaws.kinesis#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      When the number of shards in the data stream is greater than the default value for\n the MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken. You can specify\n this NextToken value in a subsequent call to ListShards to\n list the next set of shards.

                                                                      \n

                                                                      Don't specify StreamName or StreamCreationTimestamp if\n you specify NextToken because the latter unambiguously identifies the\n stream.

                                                                      \n

                                                                      You can optionally specify a value for the MaxResults parameter when\n you specify NextToken. If you specify a MaxResults value that\n is less than the number of shards that the operation returns if you don't specify\n MaxResults, the response will contain a new NextToken\n value. You can use the new NextToken value in a subsequent call to the\n ListShards operation.

                                                                      \n \n

                                                                      Tokens expire after 300 seconds. When you obtain a value for\n NextToken in the response to a call to ListShards, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards, you get\n ExpiredNextTokenException.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      When the number of shards in the data stream is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken. You can specify\n this NextToken value in a subsequent call to ListShards to\n list the next set of shards.

                                                                      \n

                                                                      Don't specify StreamName or StreamCreationTimestamp if you\n specify NextToken because the latter unambiguously identifies the\n stream.

                                                                      \n

                                                                      You can optionally specify a value for the MaxResults parameter when you\n specify NextToken. If you specify a MaxResults value that is\n less than the number of shards that the operation returns if you don't specify\n MaxResults, the response will contain a new NextToken\n value. You can use the new NextToken value in a subsequent call to the\n ListShards operation.

                                                                      \n \n

                                                                      Tokens expire after 300 seconds. When you obtain a value for\n NextToken in the response to a call to ListShards, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards, you get ExpiredNextTokenException.

                                                                      \n
                                                                      " } }, "ExclusiveStartShardId": { "target": "com.amazonaws.kinesis#ShardId", "traits": { - "smithy.api#documentation": "

                                                                      Specify this parameter to indicate that you want to list the shards starting with\n the shard whose ID immediately follows ExclusiveStartShardId.

                                                                      \n

                                                                      If you don't specify this parameter, the default behavior is for\n ListShards to list the shards starting with the first one in the\n stream.

                                                                      \n

                                                                      You cannot specify this parameter if you specify NextToken.

                                                                      " + "smithy.api#documentation": "

                                                                      Specify this parameter to indicate that you want to list the shards starting with the\n shard whose ID immediately follows ExclusiveStartShardId.

                                                                      \n

                                                                      If you don't specify this parameter, the default behavior is for\n ListShards to list the shards starting with the first one in the\n stream.

                                                                      \n

                                                                      You cannot specify this parameter if you specify NextToken.

                                                                      " } }, "MaxResults": { "target": "com.amazonaws.kinesis#ListShardsInputLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of shards to return in a single call to ListShards.\n The minimum value you can specify for this parameter is 1, and the maximum is 10,000,\n which is also the default.

                                                                      \n

                                                                      When the number of shards to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that\n you can use in a subsequent call to ListShards to list the next set of\n shards.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of shards to return in a single call to ListShards.\n The maximum number of shards to return in a single call. The default value is 1000. If\n you specify a value greater than 1000, at most 1000 results are returned.

                                                                      \n

                                                                      When the number of shards to be listed is greater than the value of\n MaxResults, the response contains a NextToken value that\n you can use in a subsequent call to ListShards to list the next set of\n shards.

                                                                      " } }, "StreamCreationTimestamp": { "target": "com.amazonaws.kinesis#Timestamp", "traits": { - "smithy.api#documentation": "

                                                                      Specify this input parameter to distinguish data streams that have the same name.\n For example, if you create a data stream and then delete it, and you later create\n another data stream with the same name, you can use this input parameter to specify\n which of the two streams you want to list the shards for.

                                                                      \n

                                                                      You cannot specify this parameter if you specify the NextToken\n parameter.

                                                                      " + "smithy.api#documentation": "

                                                                      Specify this input parameter to distinguish data streams that have the same name. For\n example, if you create a data stream and then delete it, and you later create another\n data stream with the same name, you can use this input parameter to specify which of the\n two streams you want to list the shards for.

                                                                      \n

                                                                      You cannot specify this parameter if you specify the NextToken\n parameter.

                                                                      " } }, "ShardFilter": { - "target": "com.amazonaws.kinesis#ShardFilter" + "target": "com.amazonaws.kinesis#ShardFilter", + "traits": { + "smithy.api#documentation": "

                                                                      Enables you to filter out the response of the ListShards API. You can\n only specify one filter at a time.

                                                                      \n

                                                                      If you use the ShardFilter parameter when invoking the ListShards API,\n the Type is the required property and must be specified. If you specify the\n AT_TRIM_HORIZON, FROM_TRIM_HORIZON, or\n AT_LATEST types, you do not need to specify either the\n ShardId or the Timestamp optional properties.

                                                                      \n

                                                                      If you specify the AFTER_SHARD_ID type, you must also provide the value\n for the optional ShardId property. The ShardId property is\n identical in fuctionality to the ExclusiveStartShardId parameter of the\n ListShards API. When ShardId property is specified, the\n response includes the shards starting with the shard whose ID immediately follows the\n ShardId that you provided.

                                                                      \n

                                                                      If you specify the AT_TIMESTAMP or FROM_TIMESTAMP_ID type,\n you must also provide the value for the optional Timestamp property. If you\n specify the AT_TIMESTAMP type, then all shards that were open at the provided timestamp\n are returned. If you specify the FROM_TIMESTAMP type, then all shards starting from the\n provided timestamp to TIP are returned.

                                                                      " + } } } }, @@ -1449,7 +1482,7 @@ "NextToken": { "target": "com.amazonaws.kinesis#NextToken", "traits": { - "smithy.api#documentation": "

                                                                      When the number of shards in the data stream is greater than the default value for\n the MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken. You can specify\n this NextToken value in a subsequent call to ListShards to\n list the next set of shards. For more information about the use of this pagination token\n when calling the ListShards operation, see ListShardsInput$NextToken.

                                                                      \n \n

                                                                      Tokens expire after 300 seconds. When you obtain a value for\n NextToken in the response to a call to ListShards, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards, you get\n ExpiredNextTokenException.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      When the number of shards in the data stream is greater than the default value for the\n MaxResults parameter, or if you explicitly specify a value for\n MaxResults that is less than the number of shards in the data stream,\n the response includes a pagination token named NextToken. You can specify\n this NextToken value in a subsequent call to ListShards to\n list the next set of shards. For more information about the use of this pagination token\n when calling the ListShards operation, see ListShardsInput$NextToken.

                                                                      \n \n

                                                                      Tokens expire after 300 seconds. When you obtain a value for\n NextToken in the response to a call to ListShards, you\n have 300 seconds to use that value. If you specify an expired token in a call to\n ListShards, you get ExpiredNextTokenException.

                                                                      \n
                                                                      " } } } @@ -1494,7 +1527,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream for which you want to list the registered\n consumers. For more information, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream for which you want to list the registered\n consumers. For more information, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.

                                                                      ", "smithy.api#required": {} } }, @@ -1507,7 +1540,7 @@ "MaxResults": { "target": "com.amazonaws.kinesis#ListStreamConsumersInputLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of consumers that you want a single call of\n ListStreamConsumers to return.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of consumers that you want a single call of\n ListStreamConsumers to return. The default value is 100. If you specify\n a value greater than 100, at most 100 results are returned.

                                                                      " } }, "StreamCreationTimestamp": { @@ -1559,7 +1592,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Lists your Kinesis data streams.

                                                                      \n

                                                                      The number of streams may be too large to return from a single call to\n ListStreams. You can limit the number of returned streams using the\n Limit parameter. If you do not specify a value for the\n Limit parameter, Kinesis Data Streams uses the default limit, which is\n currently 10.

                                                                      \n

                                                                      You can detect if there are more streams available to list by using the\n HasMoreStreams flag from the returned output. If there are more streams\n available, you can request more streams by using the name of the last stream returned by\n the ListStreams request in the ExclusiveStartStreamName\n parameter in a subsequent request to ListStreams. The group of stream names\n returned by the subsequent request is then added to the list. You can continue this\n process until all the stream names have been collected in the list.

                                                                      \n

                                                                      \n ListStreams has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Lists your Kinesis data streams.

                                                                      \n

                                                                      The number of streams may be too large to return from a single call to\n ListStreams. You can limit the number of returned streams using the\n Limit parameter. If you do not specify a value for the\n Limit parameter, Kinesis Data Streams uses the default limit, which is\n currently 100.

                                                                      \n

                                                                      You can detect if there are more streams available to list by using the\n HasMoreStreams flag from the returned output. If there are more streams\n available, you can request more streams by using the name of the last stream returned by\n the ListStreams request in the ExclusiveStartStreamName\n parameter in a subsequent request to ListStreams. The group of stream names\n returned by the subsequent request is then added to the list. You can continue this\n process until all the stream names have been collected in the list.

                                                                      \n

                                                                      \n ListStreams has a limit of five transactions per second per\n account.

                                                                      " } }, "com.amazonaws.kinesis#ListStreamsInput": { @@ -1568,7 +1601,7 @@ "Limit": { "target": "com.amazonaws.kinesis#ListStreamsInputLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of streams to list.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of streams to list. The default value is 100. If you specify a\n value greater than 100, at most 100 results are returned.

                                                                      " } }, "ExclusiveStartStreamName": { @@ -1598,7 +1631,7 @@ "StreamNames": { "target": "com.amazonaws.kinesis#StreamNameList", "traits": { - "smithy.api#documentation": "

                                                                      The names of the streams that are associated with the AWS account making the\n ListStreams request.

                                                                      ", + "smithy.api#documentation": "

                                                                      The names of the streams that are associated with the Amazon Web Services account\n making the ListStreams request.

                                                                      ", "smithy.api#required": {} } }, @@ -1650,7 +1683,7 @@ "ExclusiveStartTagKey": { "target": "com.amazonaws.kinesis#TagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key to use as the starting point for the list of tags. If this parameter is\n set, ListTagsForStream gets all tags that occur after\n ExclusiveStartTagKey.

                                                                      " + "smithy.api#documentation": "

                                                                      The key to use as the starting point for the list of tags. If this parameter is set,\n ListTagsForStream gets all tags that occur after\n ExclusiveStartTagKey.

                                                                      " } }, "Limit": { @@ -1687,7 +1720,7 @@ "HasMoreTags": { "target": "com.amazonaws.kinesis#BooleanObject", "traits": { - "smithy.api#documentation": "

                                                                      If set to true, more tags are available. To request additional tags,\n set ExclusiveStartTagKey to the key of the last tag returned.

                                                                      ", + "smithy.api#documentation": "

                                                                      If set to true, more tags are available. To request additional tags, set\n ExclusiveStartTagKey to the key of the last tag returned.

                                                                      ", "smithy.api#required": {} } } @@ -1713,10 +1746,13 @@ }, { "target": "com.amazonaws.kinesis#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.kinesis#ValidationException" } ], "traits": { - "smithy.api#documentation": "

                                                                      Merges two adjacent shards in a Kinesis data stream and combines them into a single\n shard to reduce the stream's capacity to ingest and transport data. Two shards are\n considered adjacent if the union of the hash key ranges for the two shards form a\n contiguous set with no gaps. For example, if you have two shards, one with a hash key\n range of 276...381 and the other with a hash key range of 382...454, then you could\n merge these two shards into a single shard that would have a hash key range of\n 276...454. After the merge, the single child shard receives data for all hash key values\n covered by the two parent shards.

                                                                      \n

                                                                      \n MergeShards is called when there is a need to reduce the overall capacity\n of a stream because of excess capacity that is not being used. You must specify the\n shard to be merged and the adjacent shard for a stream. For more information about\n merging shards, see Merge Two\n Shards in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      If the stream is in the ACTIVE state, you can call\n MergeShards. If a stream is in the CREATING,\n UPDATING, or DELETING state, MergeShards\n returns a ResourceInUseException. If the specified stream does not exist,\n MergeShards returns a ResourceNotFoundException.

                                                                      \n

                                                                      You can use DescribeStream to check the state of the stream,\n which is returned in StreamStatus.

                                                                      \n

                                                                      \n MergeShards is an asynchronous operation. Upon receiving a\n MergeShards request, Amazon Kinesis Data Streams immediately returns a\n response and sets the StreamStatus to UPDATING. After the\n operation is completed, Kinesis Data Streams sets the StreamStatus to\n ACTIVE. Read and write operations continue to work while the stream is\n in the UPDATING state.

                                                                      \n

                                                                      You use DescribeStream to determine the shard IDs that are\n specified in the MergeShards request.

                                                                      \n

                                                                      If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards,\n or SplitShard, you receive a LimitExceededException.

                                                                      \n

                                                                      \n MergeShards has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Merges two adjacent shards in a Kinesis data stream and combines them into a single\n shard to reduce the stream's capacity to ingest and transport data. Two shards are\n considered adjacent if the union of the hash key ranges for the two shards form a\n contiguous set with no gaps. For example, if you have two shards, one with a hash key\n range of 276...381 and the other with a hash key range of 382...454, then you could\n merge these two shards into a single shard that would have a hash key range of\n 276...454. After the merge, the single child shard receives data for all hash key values\n covered by the two parent shards.

                                                                      \n

                                                                      \n MergeShards is called when there is a need to reduce the overall capacity\n of a stream because of excess capacity that is not being used. You must specify the\n shard to be merged and the adjacent shard for a stream. For more information about\n merging shards, see Merge Two\n Shards in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      If the stream is in the ACTIVE state, you can call\n MergeShards. If a stream is in the CREATING,\n UPDATING, or DELETING state, MergeShards\n returns a ResourceInUseException. If the specified stream does not exist,\n MergeShards returns a ResourceNotFoundException.

                                                                      \n

                                                                      You can use DescribeStreamSummary to check the state of the stream,\n which is returned in StreamStatus.

                                                                      \n

                                                                      \n MergeShards is an asynchronous operation. Upon receiving a\n MergeShards request, Amazon Kinesis Data Streams immediately returns a\n response and sets the StreamStatus to UPDATING. After the\n operation is completed, Kinesis Data Streams sets the StreamStatus to\n ACTIVE. Read and write operations continue to work while the stream is\n in the UPDATING state.

                                                                      \n

                                                                      You use DescribeStreamSummary and the ListShards\n APIs to determine the shard IDs that are specified in the MergeShards\n request.

                                                                      \n

                                                                      If you try to operate on too many streams in parallel using CreateStream, DeleteStream, MergeShards,\n or SplitShard, you receive a LimitExceededException.

                                                                      \n

                                                                      \n MergeShards has a limit of five transactions per second per account.

                                                                      " } }, "com.amazonaws.kinesis#MergeShardsInput": { @@ -1732,7 +1768,7 @@ "ShardToMerge": { "target": "com.amazonaws.kinesis#ShardId", "traits": { - "smithy.api#documentation": "

                                                                      The shard ID of the shard to combine with the adjacent shard for the\n merge.

                                                                      ", + "smithy.api#documentation": "

                                                                      The shard ID of the shard to combine with the adjacent shard for the merge.

                                                                      ", "smithy.api#required": {} } }, @@ -1817,6 +1853,26 @@ } } }, + "com.amazonaws.kinesis#OnDemandStreamCountLimitObject": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 1000000 + } + } + }, + "com.amazonaws.kinesis#OnDemandStreamCountObject": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 0, + "max": 1000000 + } + } + }, "com.amazonaws.kinesis#PartitionKey": { "type": "string", "traits": { @@ -1846,7 +1902,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The request rate for the stream is too high, or the requested data is too large for\n the available throughput. Reduce the frequency or size of your requests. For more\n information, see Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide, and Error Retries and\n Exponential Backoff in AWS in the AWS General\n Reference.

                                                                      ", + "smithy.api#documentation": "

                                                                      The request rate for the stream is too high, or the requested data is too large for\n the available throughput. Reduce the frequency or size of your requests. For more\n information, see Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide, and Error Retries and\n Exponential Backoff in Amazon Web Services in the Amazon Web Services General Reference.

                                                                      ", "smithy.api#error": "client" } }, @@ -1888,7 +1944,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Writes a single data record into an Amazon Kinesis data stream. Call\n PutRecord to send data into the stream for real-time ingestion and\n subsequent processing, one record at a time. Each shard can support writes up to 1,000\n records per second, up to a maximum data write total of 1 MiB per second.

                                                                      \n

                                                                      You must specify the name of the stream that captures, stores, and transports the\n data; a partition key; and the data blob itself.

                                                                      \n

                                                                      The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.

                                                                      \n

                                                                      The partition key is used by Kinesis Data Streams to distribute data across shards.\n Kinesis Data Streams segregates the data records that belong to a stream into multiple\n shards, using the partition key associated with each data record to determine the shard\n to which a given data record belongs.

                                                                      \n

                                                                      Partition keys are Unicode strings, with a maximum length limit of 256 characters\n for each key. An MD5 hash function is used to map partition keys to 128-bit integer\n values and to map associated data records to shards using the hash key ranges of the\n shards. You can override hashing the partition key to determine the shard by explicitly\n specifying a hash value using the ExplicitHashKey parameter. For more\n information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n

                                                                      \n PutRecord returns the shard ID of where the data record was placed and the\n sequence number that was assigned to the data record.

                                                                      \n

                                                                      Sequence numbers increase over time and are specific to a shard within a stream,\n not across all shards within a stream. To guarantee strictly increasing ordering, write\n serially to a shard and use the SequenceNumberForOrdering parameter. For\n more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n \n

                                                                      After you write a record to a stream, you cannot modify that record or its order\n within the stream.

                                                                      \n
                                                                      \n

                                                                      If a PutRecord request cannot be processed because of insufficient\n provisioned throughput on the shard involved in the request, PutRecord\n throws ProvisionedThroughputExceededException.

                                                                      \n

                                                                      By default, data records are accessible for 24 hours from the time that they are\n added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                                                                      " + "smithy.api#documentation": "

                                                                      Writes a single data record into an Amazon Kinesis data stream. Call\n PutRecord to send data into the stream for real-time ingestion and\n subsequent processing, one record at a time. Each shard can support writes up to 1,000\n records per second, up to a maximum data write total of 1 MiB per second.

                                                                      \n

                                                                      You must specify the name of the stream that captures, stores, and transports the\n data; a partition key; and the data blob itself.

                                                                      \n

                                                                      The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.

                                                                      \n

                                                                      The partition key is used by Kinesis Data Streams to distribute data across shards.\n Kinesis Data Streams segregates the data records that belong to a stream into multiple\n shards, using the partition key associated with each data record to determine the shard\n to which a given data record belongs.

                                                                      \n

                                                                      Partition keys are Unicode strings, with a maximum length limit of 256 characters for\n each key. An MD5 hash function is used to map partition keys to 128-bit integer values\n and to map associated data records to shards using the hash key ranges of the shards.\n You can override hashing the partition key to determine the shard by explicitly\n specifying a hash value using the ExplicitHashKey parameter. For more\n information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n

                                                                      \n PutRecord returns the shard ID of where the data record was placed and the\n sequence number that was assigned to the data record.

                                                                      \n

                                                                      Sequence numbers increase over time and are specific to a shard within a stream, not\n across all shards within a stream. To guarantee strictly increasing ordering, write\n serially to a shard and use the SequenceNumberForOrdering parameter. For\n more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n \n

                                                                      After you write a record to a stream, you cannot modify that record or its order\n within the stream.

                                                                      \n
                                                                      \n

                                                                      If a PutRecord request cannot be processed because of insufficient\n provisioned throughput on the shard involved in the request, PutRecord\n throws ProvisionedThroughputExceededException.

                                                                      \n

                                                                      By default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                                                                      " } }, "com.amazonaws.kinesis#PutRecordInput": { @@ -1918,13 +1974,13 @@ "ExplicitHashKey": { "target": "com.amazonaws.kinesis#HashKey", "traits": { - "smithy.api#documentation": "

                                                                      The hash value used to explicitly determine the shard the data record is assigned\n to by overriding the partition key hash.

                                                                      " + "smithy.api#documentation": "

                                                                      The hash value used to explicitly determine the shard the data record is assigned to\n by overriding the partition key hash.

                                                                      " } }, "SequenceNumberForOrdering": { "target": "com.amazonaws.kinesis#SequenceNumber", "traits": { - "smithy.api#documentation": "

                                                                      Guarantees strictly increasing sequence numbers, for puts from the same client and\n to the same partition key. Usage: set the SequenceNumberForOrdering of\n record n to the sequence number of record n-1\n (as returned in the result when putting record n-1). If this\n parameter is not set, records are coarsely ordered based on arrival time.

                                                                      " + "smithy.api#documentation": "

                                                                      Guarantees strictly increasing sequence numbers, for puts from the same client and to\n the same partition key. Usage: set the SequenceNumberForOrdering of record\n n to the sequence number of record n-1 (as\n returned in the result when putting record n-1). If this parameter\n is not set, records are coarsely ordered based on arrival time.

                                                                      " } } }, @@ -1945,14 +2001,14 @@ "SequenceNumber": { "target": "com.amazonaws.kinesis#SequenceNumber", "traits": { - "smithy.api#documentation": "

                                                                      The sequence number identifier that was assigned to the put data record. The\n sequence number for the record is unique across all records in the stream. A sequence\n number is the identifier associated with every record put into the stream.

                                                                      ", + "smithy.api#documentation": "

                                                                      The sequence number identifier that was assigned to the put data record. The sequence\n number for the record is unique across all records in the stream. A sequence number is\n the identifier associated with every record put into the stream.

                                                                      ", "smithy.api#required": {} } }, "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "

                                                                      The encryption type to use on the record. This parameter can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed AWS KMS key.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The encryption type to use on the record. This parameter can be one of the following\n values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.

                                                                        \n
                                                                      • \n
                                                                      " } } }, @@ -1998,7 +2054,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Writes multiple data records into a Kinesis data stream in a single call (also\n referred to as a PutRecords request). Use this operation to send data into\n the stream for data ingestion and processing.

                                                                      \n

                                                                      Each PutRecords request can support up to 500 records. Each record in\n the request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,\n including partition keys. Each shard can support writes up to 1,000 records per second,\n up to a maximum data write total of 1 MiB per second.

                                                                      \n

                                                                      You must specify the name of the stream that captures, stores, and transports the\n data; and an array of request Records, with each record in the array\n requiring a partition key and data blob. The record size limit applies to the total size\n of the partition key and data blob.

                                                                      \n

                                                                      The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.

                                                                      \n

                                                                      The partition key is used by Kinesis Data Streams as input to a hash function that\n maps the partition key and associated data to a specific shard. An MD5 hash function is\n used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same\n partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n

                                                                      Each record in the Records array may include an optional parameter,\n ExplicitHashKey, which overrides the partition key to shard mapping.\n This parameter allows a data producer to determine explicitly the shard where the record\n is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.

                                                                      \n

                                                                      The PutRecords response includes an array of response\n Records. Each record in the response array directly correlates with a\n record in the request array using natural ordering, from the top to the bottom of the\n request and response. The response Records array always includes the same\n number of records as the request array.

                                                                      \n

                                                                      The response Records array includes both successfully and\n unsuccessfully processed records. Kinesis Data Streams attempts to process all records\n in each PutRecords request. A single record failure does not stop the\n processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering\n of records. If you need to read records in the same order they are written to the\n stream, use PutRecord instead of PutRecords, and write to\n the same shard.

                                                                      \n

                                                                      A successfully processed record includes ShardId and\n SequenceNumber values. The ShardId parameter identifies\n the shard in the stream where the record is stored. The SequenceNumber\n parameter is an identifier assigned to the put record, unique to all records in the\n stream.

                                                                      \n

                                                                      An unsuccessfully processed record includes ErrorCode and\n ErrorMessage values. ErrorCode reflects the type of error\n and can be one of the following values:\n ProvisionedThroughputExceededException or InternalFailure.\n ErrorMessage provides more detailed information about the\n ProvisionedThroughputExceededException exception including the account\n ID, stream name, and shard ID of the record that was throttled. For more information\n about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.

                                                                      \n \n

                                                                      After you write a record to a stream, you cannot modify that record or its order\n within the stream.

                                                                      \n
                                                                      \n

                                                                      By default, data records are accessible for 24 hours from the time that they are\n added to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                                                                      " + "smithy.api#documentation": "

                                                                      Writes multiple data records into a Kinesis data stream in a single call (also\n referred to as a PutRecords request). Use this operation to send data into\n the stream for data ingestion and processing.

                                                                      \n

                                                                      Each PutRecords request can support up to 500 records. Each record in the\n request can be as large as 1 MiB, up to a limit of 5 MiB for the entire request,\n including partition keys. Each shard can support writes up to 1,000 records per second,\n up to a maximum data write total of 1 MiB per second.

                                                                      \n

                                                                      You must specify the name of the stream that captures, stores, and transports the\n data; and an array of request Records, with each record in the array\n requiring a partition key and data blob. The record size limit applies to the total size\n of the partition key and data blob.

                                                                      \n

                                                                      The data blob can be any type of data; for example, a segment from a log file,\n geographic/location data, website clickstream data, and so on.

                                                                      \n

                                                                      The partition key is used by Kinesis Data Streams as input to a hash function that\n maps the partition key and associated data to a specific shard. An MD5 hash function is\n used to map partition keys to 128-bit integer values and to map associated data records\n to shards. As a result of this hashing mechanism, all data records with the same\n partition key map to the same shard within the stream. For more information, see Adding Data to a Stream in the Amazon Kinesis Data Streams\n Developer Guide.

                                                                      \n

                                                                      Each record in the Records array may include an optional parameter,\n ExplicitHashKey, which overrides the partition key to shard mapping.\n This parameter allows a data producer to determine explicitly the shard where the record\n is stored. For more information, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.

                                                                      \n

                                                                      The PutRecords response includes an array of response\n Records. Each record in the response array directly correlates with a\n record in the request array using natural ordering, from the top to the bottom of the\n request and response. The response Records array always includes the same\n number of records as the request array.

                                                                      \n

                                                                      The response Records array includes both successfully and unsuccessfully\n processed records. Kinesis Data Streams attempts to process all records in each\n PutRecords request. A single record failure does not stop the\n processing of subsequent records. As a result, PutRecords doesn't guarantee the ordering\n of records. If you need to read records in the same order they are written to the\n stream, use PutRecord instead of PutRecords, and write to\n the same shard.

                                                                      \n

                                                                      A successfully processed record includes ShardId and\n SequenceNumber values. The ShardId parameter identifies\n the shard in the stream where the record is stored. The SequenceNumber\n parameter is an identifier assigned to the put record, unique to all records in the\n stream.

                                                                      \n

                                                                      An unsuccessfully processed record includes ErrorCode and\n ErrorMessage values. ErrorCode reflects the type of error\n and can be one of the following values:\n ProvisionedThroughputExceededException or InternalFailure.\n ErrorMessage provides more detailed information about the\n ProvisionedThroughputExceededException exception including the account\n ID, stream name, and shard ID of the record that was throttled. For more information\n about partially successful responses, see Adding Multiple Records with PutRecords in the Amazon Kinesis\n Data Streams Developer Guide.

                                                                      \n \n

                                                                      After you write a record to a stream, you cannot modify that record or its order\n within the stream.

                                                                      \n
                                                                      \n

                                                                      By default, data records are accessible for 24 hours from the time that they are added\n to a stream. You can use IncreaseStreamRetentionPeriod or DecreaseStreamRetentionPeriod to modify this retention period.

                                                                      " } }, "com.amazonaws.kinesis#PutRecordsInput": { @@ -2035,14 +2091,14 @@ "Records": { "target": "com.amazonaws.kinesis#PutRecordsResultEntryList", "traits": { - "smithy.api#documentation": "

                                                                      An array of successfully and unsuccessfully processed record results, correlated\n with the request by natural ordering. A record that is successfully added to a stream\n includes SequenceNumber and ShardId in the result. A record\n that fails to be added to a stream includes ErrorCode and\n ErrorMessage in the result.

                                                                      ", + "smithy.api#documentation": "

                                                                      An array of successfully and unsuccessfully processed record results. A record that is\n successfully added to a stream includes SequenceNumber and\n ShardId in the result. A record that fails to be added to a stream\n includes ErrorCode and ErrorMessage in the result.

                                                                      ", "smithy.api#required": {} } }, "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "

                                                                      The encryption type used on the records. This parameter can be one of the following\n values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records using a\n customer-managed AWS KMS key.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The encryption type used on the records. This parameter can be one of the following\n values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records using a\n customer-managed Amazon Web Services KMS key.

                                                                        \n
                                                                      • \n
                                                                      " } } }, @@ -2063,7 +2119,7 @@ "ExplicitHashKey": { "target": "com.amazonaws.kinesis#HashKey", "traits": { - "smithy.api#documentation": "

                                                                      The hash value used to determine explicitly the shard that the data record is\n assigned to by overriding the partition key hash.

                                                                      " + "smithy.api#documentation": "

                                                                      The hash value used to determine explicitly the shard that the data record is assigned\n to by overriding the partition key hash.

                                                                      " } }, "PartitionKey": { @@ -2108,18 +2164,18 @@ "ErrorCode": { "target": "com.amazonaws.kinesis#ErrorCode", "traits": { - "smithy.api#documentation": "

                                                                      The error code for an individual record result. ErrorCodes can be\n either ProvisionedThroughputExceededException or\n InternalFailure.

                                                                      " + "smithy.api#documentation": "

                                                                      The error code for an individual record result. ErrorCodes can be either\n ProvisionedThroughputExceededException or\n InternalFailure.

                                                                      " } }, "ErrorMessage": { "target": "com.amazonaws.kinesis#ErrorMessage", "traits": { - "smithy.api#documentation": "

                                                                      The error message for an individual record result. An ErrorCode value\n of ProvisionedThroughputExceededException has an error message that\n includes the account ID, stream name, and shard ID. An ErrorCode value of\n InternalFailure has the error message \"Internal Service\n Failure\".

                                                                      " + "smithy.api#documentation": "

                                                                      The error message for an individual record result. An ErrorCode value of\n ProvisionedThroughputExceededException has an error message that\n includes the account ID, stream name, and shard ID. An ErrorCode value of\n InternalFailure has the error message \"Internal Service\n Failure\".

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Represents the result of an individual record from a PutRecords\n request. A record that is successfully added to a stream includes\n SequenceNumber and ShardId in the result. A record that\n fails to be added to the stream includes ErrorCode and\n ErrorMessage in the result.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents the result of an individual record from a PutRecords request.\n A record that is successfully added to a stream includes SequenceNumber and\n ShardId in the result. A record that fails to be added to the stream\n includes ErrorCode and ErrorMessage in the result.

                                                                      " } }, "com.amazonaws.kinesis#PutRecordsResultEntryList": { @@ -2167,12 +2223,12 @@ "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "

                                                                      The encryption type used on the record. This parameter can be one of the following\n values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed AWS KMS key.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The encryption type used on the record. This parameter can be one of the following\n values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.

                                                                        \n
                                                                      • \n
                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The unit of data of the Kinesis data stream, which is composed of a sequence\n number, a partition key, and a data blob.

                                                                      " + "smithy.api#documentation": "

                                                                      The unit of data of the Kinesis data stream, which is composed of a sequence number, a\n partition key, and a data blob.

                                                                      " } }, "com.amazonaws.kinesis#RecordList": { @@ -2213,7 +2269,7 @@ "StreamARN": { "target": "com.amazonaws.kinesis#StreamARN", "traits": { - "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that you want to register the consumer with. For\n more info, see Amazon Resource Names (ARNs) and AWS Service Namespaces.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ARN of the Kinesis data stream that you want to register the consumer with. For\n more info, see Amazon Resource Names (ARNs) and Amazon Web Services Service\n Namespaces.

                                                                      ", "smithy.api#required": {} } }, @@ -2333,7 +2389,7 @@ "com.amazonaws.kinesis#SequenceNumber": { "type": "string", "traits": { - "smithy.api#pattern": "0|([1-9]\\d{0,128})" + "smithy.api#pattern": "^0|([1-9]\\d{0,128})$" } }, "com.amazonaws.kinesis#SequenceNumberRange": { @@ -2414,15 +2470,25 @@ "Type": { "target": "com.amazonaws.kinesis#ShardFilterType", "traits": { + "smithy.api#documentation": "

                                                                      The shard type specified in the ShardFilter parameter. This is a required\n property of the ShardFilter parameter.

                                                                      \n

                                                                      You can specify the following valid values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n AFTER_SHARD_ID - the response includes all the shards, starting\n with the shard whose ID immediately follows the ShardId that you\n provided.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AT_TRIM_HORIZON - the response includes all the shards that were\n open at TRIM_HORIZON.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FROM_TRIM_HORIZON - (default), the response includes all the\n shards within the retention period of the data stream (trim to tip).

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AT_LATEST - the response includes only the currently open shards\n of the data stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n AT_TIMESTAMP - the response includes all shards whose start\n timestamp is less than or equal to the given timestamp and end timestamp is\n greater than or equal to the given timestamp or still open.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n FROM_TIMESTAMP - the response incldues all closed shards whose\n end timestamp is greater than or equal to the given timestamp and also all open\n shards. Corrected to TRIM_HORIZON of the data stream if\n FROM_TIMESTAMP is less than the TRIM_HORIZON\n value.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, "ShardId": { - "target": "com.amazonaws.kinesis#ShardId" + "target": "com.amazonaws.kinesis#ShardId", + "traits": { + "smithy.api#documentation": "

                                                                      The exclusive start shardID speified in the ShardFilter\n parameter. This property can only be used if the AFTER_SHARD_ID shard type\n is specified.

                                                                      " + } }, "Timestamp": { - "target": "com.amazonaws.kinesis#Timestamp" + "target": "com.amazonaws.kinesis#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The timestamps specified in the ShardFilter parameter. A timestamp is a\n Unix epoch date with precision in milliseconds. For example,\n 2016-04-04T19:58:46.480-00:00 or 1459799926.480. This property can only be used if\n FROM_TIMESTAMP or AT_TIMESTAMP shard types are\n specified.

                                                                      " + } } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request parameter used to filter out the response of the ListShards\n API.

                                                                      " } }, "com.amazonaws.kinesis#ShardFilterType": { @@ -2463,7 +2529,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "[a-zA-Z0-9_.-]+" + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" } }, "com.amazonaws.kinesis#ShardIdList": { @@ -2531,10 +2597,13 @@ }, { "target": "com.amazonaws.kinesis#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.kinesis#ValidationException" } ], "traits": { - "smithy.api#documentation": "

                                                                      Splits a shard into two new shards in the Kinesis data stream, to increase the\n stream's capacity to ingest and transport data. SplitShard is called when\n there is a need to increase the overall capacity of a stream because of an expected\n increase in the volume of data records being ingested.

                                                                      \n

                                                                      You can also use SplitShard when a shard appears to be approaching its\n maximum utilization; for example, the producers sending data into the specific shard are\n suddenly sending more than previously anticipated. You can also call\n SplitShard to increase stream capacity, so that more Kinesis Data\n Streams applications can simultaneously read data from the stream for real-time\n processing.

                                                                      \n

                                                                      You must specify the shard to be split and the new hash key, which is the position\n in the shard where the shard gets split in two. In many cases, the new hash key might be\n the average of the beginning and ending hash key, but it can be any hash key value in\n the range being mapped into the shard. For more information, see Split a\n Shard in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      You can use DescribeStream to determine the shard ID and hash key\n values for the ShardToSplit and NewStartingHashKey parameters\n that are specified in the SplitShard request.

                                                                      \n

                                                                      \n SplitShard is an asynchronous operation. Upon receiving a\n SplitShard request, Kinesis Data Streams immediately returns a response\n and sets the stream status to UPDATING. After the operation is completed,\n Kinesis Data Streams sets the stream status to ACTIVE. Read and write\n operations continue to work while the stream is in the UPDATING state.

                                                                      \n

                                                                      You can use DescribeStream to check the status of the stream, which is\n returned in StreamStatus. If the stream is in the ACTIVE\n state, you can call SplitShard. If a stream is in CREATING or\n UPDATING or DELETING states, DescribeStream\n returns a ResourceInUseException.

                                                                      \n

                                                                      If the specified stream does not exist, DescribeStream returns a\n ResourceNotFoundException. If you try to create more shards than are\n authorized for your account, you receive a LimitExceededException.

                                                                      \n

                                                                      For the default shard limit for an AWS account, see Kinesis Data Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide. To increase this limit, contact AWS\n Support.

                                                                      \n

                                                                      If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a\n LimitExceededException.

                                                                      \n

                                                                      \n SplitShard has a limit of five transactions per second per\n account.

                                                                      " + "smithy.api#documentation": "

                                                                      Splits a shard into two new shards in the Kinesis data stream, to increase the\n stream's capacity to ingest and transport data. SplitShard is called when\n there is a need to increase the overall capacity of a stream because of an expected\n increase in the volume of data records being ingested.

                                                                      \n

                                                                      You can also use SplitShard when a shard appears to be approaching its\n maximum utilization; for example, the producers sending data into the specific shard are\n suddenly sending more than previously anticipated. You can also call\n SplitShard to increase stream capacity, so that more Kinesis Data\n Streams applications can simultaneously read data from the stream for real-time\n processing.

                                                                      \n

                                                                      You must specify the shard to be split and the new hash key, which is the position in\n the shard where the shard gets split in two. In many cases, the new hash key might be\n the average of the beginning and ending hash key, but it can be any hash key value in\n the range being mapped into the shard. For more information, see Split a\n Shard in the Amazon Kinesis Data Streams Developer\n Guide.

                                                                      \n

                                                                      You can use DescribeStreamSummary and the ListShards APIs to determine the shard ID and hash key values for the ShardToSplit\n and NewStartingHashKey parameters that are specified in the\n SplitShard request.

                                                                      \n

                                                                      \n SplitShard is an asynchronous operation. Upon receiving a\n SplitShard request, Kinesis Data Streams immediately returns a response\n and sets the stream status to UPDATING. After the operation is completed,\n Kinesis Data Streams sets the stream status to ACTIVE. Read and write\n operations continue to work while the stream is in the UPDATING state.

                                                                      \n

                                                                      You can use DescribeStreamSummary to check the status of the stream,\n which is returned in StreamStatus. If the stream is in the\n ACTIVE state, you can call SplitShard.\n

                                                                      \n

                                                                      If the specified stream does not exist, DescribeStreamSummary\n returns a ResourceNotFoundException. If you try to create more shards than\n are authorized for your account, you receive a LimitExceededException.

                                                                      \n

                                                                      For the default shard limit for an Amazon Web Services account, see Kinesis\n Data Streams Limits in the Amazon Kinesis Data Streams Developer\n Guide. To increase this limit, contact Amazon Web Services\n Support.

                                                                      \n

                                                                      If you try to operate on too many streams simultaneously using CreateStream, DeleteStream, MergeShards, and/or SplitShard, you receive a\n LimitExceededException.

                                                                      \n

                                                                      \n SplitShard has a limit of five transactions per second per account.

                                                                      " } }, "com.amazonaws.kinesis#SplitShardInput": { @@ -2557,7 +2626,7 @@ "NewStartingHashKey": { "target": "com.amazonaws.kinesis#HashKey", "traits": { - "smithy.api#documentation": "

                                                                      A hash key value for the starting hash key of one of the child shards created by\n the split. The hash key range for a given shard constitutes a set of ordered contiguous\n positive integers. The value for NewStartingHashKey must be in the range of\n hash keys being mapped into the shard. The NewStartingHashKey hash key\n value and all higher hash key values in hash key range are distributed to one of the\n child shards. All the lower hash key values in the range are distributed to the other\n child shard.

                                                                      ", + "smithy.api#documentation": "

                                                                      A hash key value for the starting hash key of one of the child shards created by the\n split. The hash key range for a given shard constitutes a set of ordered contiguous\n positive integers. The value for NewStartingHashKey must be in the range of\n hash keys being mapped into the shard. The NewStartingHashKey hash key\n value and all higher hash key values in hash key range are distributed to one of the\n child shards. All the lower hash key values in the range are distributed to the other\n child shard.

                                                                      ", "smithy.api#required": {} } } @@ -2604,7 +2673,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Enables or updates server-side encryption using an AWS KMS key for a specified\n stream.

                                                                      \n

                                                                      Starting encryption is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Updating or applying encryption\n normally takes a few seconds to complete, but it can take minutes. You can continue to\n read and write data to your stream while its status is UPDATING. Once the\n status of the stream is ACTIVE, encryption begins for records written to\n the stream.

                                                                      \n

                                                                      API Limits: You can successfully apply a new AWS KMS key for server-side encryption\n 25 times in a rolling 24-hour period.

                                                                      \n

                                                                      Note: It can take up to 5 seconds after the stream is in an ACTIVE\n status before all records written to the stream are encrypted. After you enable\n encryption, you can verify that encryption is applied by inspecting the API response\n from PutRecord or PutRecords.

                                                                      " + "smithy.api#documentation": "

                                                                      Enables or updates server-side encryption using an Amazon Web Services KMS key for a\n specified stream.

                                                                      \n

                                                                      Starting encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Updating or applying encryption\n normally takes a few seconds to complete, but it can take minutes. You can continue to\n read and write data to your stream while its status is UPDATING. Once the\n status of the stream is ACTIVE, encryption begins for records written to\n the stream.

                                                                      \n

                                                                      API Limits: You can successfully apply a new Amazon Web Services KMS key for\n server-side encryption 25 times in a rolling 24-hour period.

                                                                      \n

                                                                      Note: It can take up to 5 seconds after the stream is in an ACTIVE status\n before all records written to the stream are encrypted. After you enable encryption, you\n can verify that encryption is applied by inspecting the API response from\n PutRecord or PutRecords.

                                                                      " } }, "com.amazonaws.kinesis#StartStreamEncryptionInput": { @@ -2627,7 +2696,7 @@ "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "

                                                                      The GUID for the customer-managed AWS KMS key to use for encryption. This value can\n be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either\n an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key\n owned by Kinesis Data Streams by specifying the alias\n aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } } @@ -2657,7 +2726,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      " + "smithy.api#documentation": "

                                                                      The starting position in the data stream from which to start streaming.

                                                                      " } }, "com.amazonaws.kinesis#StopStreamEncryption": { @@ -2680,7 +2749,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Disables server-side encryption for a specified stream.

                                                                      \n

                                                                      Stopping encryption is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Stopping encryption normally takes a\n few seconds to complete, but it can take minutes. You can continue to read and write\n data to your stream while its status is UPDATING. Once the status of the\n stream is ACTIVE, records written to the stream are no longer encrypted by\n Kinesis Data Streams.

                                                                      \n

                                                                      API Limits: You can successfully disable server-side encryption 25 times in a\n rolling 24-hour period.

                                                                      \n

                                                                      Note: It can take up to 5 seconds after the stream is in an ACTIVE\n status before all records written to the stream are no longer subject to encryption.\n After you disabled encryption, you can verify that encryption is not applied by\n inspecting the API response from PutRecord or\n PutRecords.

                                                                      " + "smithy.api#documentation": "

                                                                      Disables server-side encryption for a specified stream.

                                                                      \n

                                                                      Stopping encryption is an asynchronous operation. Upon receiving the request, Kinesis\n Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Stopping encryption normally takes a\n few seconds to complete, but it can take minutes. You can continue to read and write\n data to your stream while its status is UPDATING. Once the status of the\n stream is ACTIVE, records written to the stream are no longer encrypted by\n Kinesis Data Streams.

                                                                      \n

                                                                      API Limits: You can successfully disable server-side encryption 25 times in a rolling\n 24-hour period.

                                                                      \n

                                                                      Note: It can take up to 5 seconds after the stream is in an ACTIVE status\n before all records written to the stream are no longer subject to encryption. After you\n disabled encryption, you can verify that encryption is not applied by inspecting the API\n response from PutRecord or PutRecords.

                                                                      " } }, "com.amazonaws.kinesis#StopStreamEncryptionInput": { @@ -2703,7 +2772,7 @@ "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "

                                                                      The GUID for the customer-managed AWS KMS key to use for encryption. This value can\n be a globally unique identifier, a fully specified Amazon Resource Name (ARN) to either\n an alias or a key, or an alias name prefixed by \"alias/\".You can also use a master key\n owned by Kinesis Data Streams by specifying the alias\n aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified Amazon Resource Name\n (ARN) to either an alias or a key, or an alias name prefixed by \"alias/\".You can also\n use a master key owned by Kinesis Data Streams by specifying the alias\n aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } } @@ -2716,7 +2785,7 @@ "min": 1, "max": 2048 }, - "smithy.api#pattern": "arn:aws.*:kinesis:.*:\\d{12}:stream/.+" + "smithy.api#pattern": "^arn:aws.*:kinesis:.*:\\d{12}:stream/.+$" } }, "com.amazonaws.kinesis#StreamDescription": { @@ -2743,6 +2812,12 @@ "smithy.api#required": {} } }, + "StreamModeDetails": { + "target": "com.amazonaws.kinesis#StreamModeDetails", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the capacity mode to which you want to set your data stream. Currently, in\n Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                                                                      " + } + }, "Shards": { "target": "com.amazonaws.kinesis#ShardList", "traits": { @@ -2781,13 +2856,13 @@ "EncryptionType": { "target": "com.amazonaws.kinesis#EncryptionType", "traits": { - "smithy.api#documentation": "

                                                                      The server-side encryption type used on the stream. This parameter can be one of\n the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed AWS KMS key.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The server-side encryption type used on the stream. This parameter can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n NONE: Do not encrypt the records in the stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n KMS: Use server-side encryption on the records in the stream\n using a customer-managed Amazon Web Services KMS key.

                                                                        \n
                                                                      • \n
                                                                      " } }, "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "

                                                                      The GUID for the customer-managed AWS KMS key to use for encryption. This value can\n be a globally unique identifier, a fully specified ARN to either an alias or a key, or\n an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data\n Streams by specifying the alias aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example:\n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      " } } }, @@ -2819,6 +2894,12 @@ "smithy.api#required": {} } }, + "StreamModeDetails": { + "target": "com.amazonaws.kinesis#StreamModeDetails", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the capacity mode to which you want to set your data stream. Currently, in\n Kinesis Data Streams, you can choose between an on-demand ycapacity mode and a provisioned capacity mode for your data streams.

                                                                      " + } + }, "RetentionPeriodHours": { "target": "com.amazonaws.kinesis#RetentionPeriodHours", "traits": { @@ -2849,7 +2930,7 @@ "KeyId": { "target": "com.amazonaws.kinesis#KeyId", "traits": { - "smithy.api#documentation": "

                                                                      The GUID for the customer-managed AWS KMS key to use for encryption. This value can\n be a globally unique identifier, a fully specified ARN to either an alias or a key, or\n an alias name prefixed by \"alias/\".You can also use a master key owned by Kinesis Data\n Streams by specifying the alias aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example: \n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The GUID for the customer-managed Amazon Web Services KMS key to use for encryption.\n This value can be a globally unique identifier, a fully specified ARN to either an alias\n or a key, or an alias name prefixed by \"alias/\".You can also use a master key owned by\n Kinesis Data Streams by specifying the alias aws/kinesis.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Key ARN example:\n arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias ARN example: \n arn:aws:kms:us-east-1:123456789012:alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Globally unique key ID example:\n 12345678-1234-1234-1234-123456789012\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Alias name example: alias/MyAliasName\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Master key owned by Kinesis Data Streams:\n alias/aws/kinesis\n

                                                                        \n
                                                                      • \n
                                                                      " } }, "OpenShardCount": { @@ -2870,6 +2951,36 @@ "smithy.api#documentation": "

                                                                      Represents the output for DescribeStreamSummary\n

                                                                      " } }, + "com.amazonaws.kinesis#StreamMode": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PROVISIONED", + "name": "PROVISIONED" + }, + { + "value": "ON_DEMAND", + "name": "ON_DEMAND" + } + ] + } + }, + "com.amazonaws.kinesis#StreamModeDetails": { + "type": "structure", + "members": { + "StreamMode": { + "target": "com.amazonaws.kinesis#StreamMode", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the capacity mode to which you want to set your data stream. Currently, in\n Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the capacity mode to which you want to set your data stream. Currently, in\n Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                                                                      " + } + }, "com.amazonaws.kinesis#StreamName": { "type": "string", "traits": { @@ -2877,7 +2988,7 @@ "min": 1, "max": 128 }, - "smithy.api#pattern": "[a-zA-Z0-9_.-]+" + "smithy.api#pattern": "^[a-zA-Z0-9_.-]+$" } }, "com.amazonaws.kinesis#StreamNameList": { @@ -2932,7 +3043,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN parameter and the shard you specify in the\n ShardId parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.

                                                                      \n

                                                                      When the SubscribeToShard call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard again to\n renew the subscription if you want to continue to receive records.

                                                                      \n

                                                                      You can make one call to SubscribeToShard per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.

                                                                      \n

                                                                      If you call SubscribeToShard again with the same ConsumerARN\n and ShardId within 5 seconds of a successful call, you'll get a\n ResourceInUseException. If you call SubscribeToShard 5\n seconds or more after a successful call, the first connection will expire and the second\n call will take over the subscription.

                                                                      \n

                                                                      For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

                                                                      " + "smithy.api#documentation": "

                                                                      This operation establishes an HTTP/2 connection between the consumer you specify in\n the ConsumerARN parameter and the shard you specify in the\n ShardId parameter. After the connection is successfully established,\n Kinesis Data Streams pushes records from the shard to the consumer over this connection.\n Before you call this operation, call RegisterStreamConsumer to\n register the consumer with Kinesis Data Streams.

                                                                      \n

                                                                      When the SubscribeToShard call succeeds, your consumer starts receiving\n events of type SubscribeToShardEvent over the HTTP/2 connection for up\n to 5 minutes, after which time you need to call SubscribeToShard again to\n renew the subscription if you want to continue to receive records.

                                                                      \n

                                                                      You can make one call to SubscribeToShard per second per registered\n consumer per shard. For example, if you have a 4000 shard stream and two registered\n stream consumers, you can make one SubscribeToShard request per second for\n each combination of shard and registered consumer, allowing you to subscribe both\n consumers to all 4000 shards in one second.

                                                                      \n

                                                                      If you call SubscribeToShard again with the same ConsumerARN\n and ShardId within 5 seconds of a successful call, you'll get a\n ResourceInUseException. If you call SubscribeToShard 5\n seconds or more after a successful call, the second call takes over the subscription and\n the previous connection expires or fails with a\n ResourceInUseException.

                                                                      \n

                                                                      For an example of how to use this operations, see Enhanced Fan-Out\n Using the Kinesis Data Streams API.

                                                                      " } }, "com.amazonaws.kinesis#SubscribeToShardEvent": { @@ -2960,7 +3071,10 @@ } }, "ChildShards": { - "target": "com.amazonaws.kinesis#ChildShardList" + "target": "com.amazonaws.kinesis#ChildShardList", + "traits": { + "smithy.api#documentation": "

                                                                      The list of the child shards of the current shard, returned only at the end of the\n current shard.

                                                                      " + } } }, "traits": { @@ -3032,7 +3146,7 @@ "StartingPosition": { "target": "com.amazonaws.kinesis#StartingPosition", "traits": { - "smithy.api#documentation": "

                                                                      ", + "smithy.api#documentation": "

                                                                      The starting position in the data stream from which to start streaming.

                                                                      ", "smithy.api#required": {} } } @@ -3063,7 +3177,7 @@ "Value": { "target": "com.amazonaws.kinesis#TagValue", "traits": { - "smithy.api#documentation": "

                                                                      An optional string, typically used to describe or define the tag. Maximum length:\n 256 characters. Valid characters: Unicode letters, digits, white space, _ . / = + - %\n @

                                                                      " + "smithy.api#documentation": "

                                                                      An optional string, typically used to describe or define the tag. Maximum length: 256\n characters. Valid characters: Unicode letters, digits, white space, _ . / = + - %\n @

                                                                      " } } }, @@ -3151,10 +3265,13 @@ }, { "target": "com.amazonaws.kinesis#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.kinesis#ValidationException" } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates the shard count of the specified stream to the specified number of\n shards.

                                                                      \n

                                                                      Updating the shard count is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Depending on the size of the stream,\n the scaling action could take a few minutes to complete. You can continue to read and\n write data to your stream while its status is UPDATING.

                                                                      \n

                                                                      To update the shard count, Kinesis Data Streams performs splits or merges on\n individual shards. This can cause short-lived shards to be created, in addition to the\n final shards. These short-lived shards count towards your total shard limit for your\n account in the Region.

                                                                      \n

                                                                      When using this operation, we recommend that you specify a target shard count that\n is a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your\n shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling\n action might take longer to complete.

                                                                      \n

                                                                      This operation has the following default limits. By default, you cannot do the\n following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Scale more than ten times per rolling 24-hour period per stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than double your current shard count for a\n stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale down below half your current shard count for a stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than 500 shards in a stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale a stream with more than 500 shards down unless the result is less\n than 500 shards

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than the shard limit for your account

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For the default limits for an AWS account, see Streams Limits in the\n Amazon Kinesis Data Streams Developer Guide. To request an\n increase in the call rate limit, the shard limit for this API, or your overall shard\n limit, use the limits form.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the shard count of the specified stream to the specified number of\n shards.

                                                                      \n

                                                                      Updating the shard count is an asynchronous operation. Upon receiving the request,\n Kinesis Data Streams returns immediately and sets the status of the stream to\n UPDATING. After the update is complete, Kinesis Data Streams sets the\n status of the stream back to ACTIVE. Depending on the size of the stream,\n the scaling action could take a few minutes to complete. You can continue to read and\n write data to your stream while its status is UPDATING.

                                                                      \n

                                                                      To update the shard count, Kinesis Data Streams performs splits or merges on\n individual shards. This can cause short-lived shards to be created, in addition to the\n final shards. These short-lived shards count towards your total shard limit for your\n account in the Region.

                                                                      \n

                                                                      When using this operation, we recommend that you specify a target shard count that is\n a multiple of 25% (25%, 50%, 75%, 100%). You can specify any target value within your\n shard limit. However, if you specify a target that isn't a multiple of 25%, the scaling\n action might take longer to complete.

                                                                      \n

                                                                      This operation has the following default limits. By default, you cannot do the\n following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Scale more than ten times per rolling 24-hour period per stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than double your current shard count for a stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale down below half your current shard count for a stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than 10000 shards in a stream

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale a stream with more than 10000 shards down unless the result is less than\n 10000 shards

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale up to more than the shard limit for your account

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For the default limits for an Amazon Web Services account, see Streams\n Limits in the Amazon Kinesis Data Streams Developer\n Guide. To request an increase in the call rate limit, the shard limit for\n this API, or your overall shard limit, use the limits form.

                                                                      " } }, "com.amazonaws.kinesis#UpdateShardCountInput": { @@ -3170,7 +3287,7 @@ "TargetShardCount": { "target": "com.amazonaws.kinesis#PositiveIntegerObject", "traits": { - "smithy.api#documentation": "

                                                                      The new number of shards. This value has the following default limits. By default,\n you cannot do the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set this value to more than double your current shard count for a\n stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set this value below half your current shard count for a stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set this value to more than 500 shards in a stream (the default limit for\n shard count per stream is 500 per account per region), unless you request a\n limit increase.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale a stream with more than 500 shards down unless you set this value to\n less than 500 shards.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The new number of shards. This value has the following default limits. By default, you\n cannot do the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Set this value to more than double your current shard count for a\n stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set this value below half your current shard count for a stream.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Set this value to more than 10000 shards in a stream (the default limit for\n shard count per stream is 10000 per account per region), unless you request a\n limit increase.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Scale a stream with more than 10000 shards down unless you set this value to\n less than 10000 shards.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -3205,6 +3322,60 @@ } } } + }, + "com.amazonaws.kinesis#UpdateStreamMode": { + "type": "operation", + "input": { + "target": "com.amazonaws.kinesis#UpdateStreamModeInput" + }, + "errors": [ + { + "target": "com.amazonaws.kinesis#InvalidArgumentException" + }, + { + "target": "com.amazonaws.kinesis#LimitExceededException" + }, + { + "target": "com.amazonaws.kinesis#ResourceInUseException" + }, + { + "target": "com.amazonaws.kinesis#ResourceNotFoundException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the capacity mode of the data stream. Currently, in Kinesis Data Streams, you\n can choose between an on-demand capacity mode and a\n provisioned capacity mode for your data stream.\n

                                                                      " + } + }, + "com.amazonaws.kinesis#UpdateStreamModeInput": { + "type": "structure", + "members": { + "StreamARN": { + "target": "com.amazonaws.kinesis#StreamARN", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the ARN of the data stream whose capacity mode you want to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "StreamModeDetails": { + "target": "com.amazonaws.kinesis#StreamModeDetails", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the capacity mode to which you want to set your data stream. Currently, in\n Kinesis Data Streams, you can choose between an on-demand capacity mode and a provisioned capacity mode for your data streams.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.kinesis#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.kinesis#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      \n \n

                                                                      ", + "smithy.api#error": "client" + } } } } diff --git a/codegen/sdk-codegen/aws-models/lakeformation.json b/codegen/sdk-codegen/aws-models/lakeformation.json index c4e978689049..ac79531802cf 100644 --- a/codegen/sdk-codegen/aws-models/lakeformation.json +++ b/codegen/sdk-codegen/aws-models/lakeformation.json @@ -31,6 +31,21 @@ "shapes": { "com.amazonaws.lakeformation#AWSLakeFormation": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "LakeFormation", + "arnNamespace": "lakeformation", + "cloudFormationName": "LakeFormation", + "cloudTrailEventSource": "lakeformation.amazonaws.com", + "endpointPrefix": "lakeformation" + }, + "aws.auth#sigv4": { + "name": "lakeformation" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Lake Formation\n

                                                                      Defines the public endpoint for the Lake Formation service.

                                                                      ", + "smithy.api#title": "AWS Lake Formation" + }, "version": "2017-03-31", "operations": [ { @@ -42,18 +57,39 @@ { "target": "com.amazonaws.lakeformation#BatchRevokePermissions" }, + { + "target": "com.amazonaws.lakeformation#CancelTransaction" + }, + { + "target": "com.amazonaws.lakeformation#CommitTransaction" + }, + { + "target": "com.amazonaws.lakeformation#CreateDataCellsFilter" + }, { "target": "com.amazonaws.lakeformation#CreateLFTag" }, + { + "target": "com.amazonaws.lakeformation#DeleteDataCellsFilter" + }, { "target": "com.amazonaws.lakeformation#DeleteLFTag" }, + { + "target": "com.amazonaws.lakeformation#DeleteObjectsOnCancel" + }, { "target": "com.amazonaws.lakeformation#DeregisterResource" }, { "target": "com.amazonaws.lakeformation#DescribeResource" }, + { + "target": "com.amazonaws.lakeformation#DescribeTransaction" + }, + { + "target": "com.amazonaws.lakeformation#ExtendTransaction" + }, { "target": "com.amazonaws.lakeformation#GetDataLakeSettings" }, @@ -63,12 +99,30 @@ { "target": "com.amazonaws.lakeformation#GetLFTag" }, + { + "target": "com.amazonaws.lakeformation#GetQueryState" + }, + { + "target": "com.amazonaws.lakeformation#GetQueryStatistics" + }, { "target": "com.amazonaws.lakeformation#GetResourceLFTags" }, + { + "target": "com.amazonaws.lakeformation#GetTableObjects" + }, + { + "target": "com.amazonaws.lakeformation#GetWorkUnitResults" + }, + { + "target": "com.amazonaws.lakeformation#GetWorkUnits" + }, { "target": "com.amazonaws.lakeformation#GrantPermissions" }, + { + "target": "com.amazonaws.lakeformation#ListDataCellsFilter" + }, { "target": "com.amazonaws.lakeformation#ListLFTags" }, @@ -78,6 +132,12 @@ { "target": "com.amazonaws.lakeformation#ListResources" }, + { + "target": "com.amazonaws.lakeformation#ListTableStorageOptimizers" + }, + { + "target": "com.amazonaws.lakeformation#ListTransactions" + }, { "target": "com.amazonaws.lakeformation#PutDataLakeSettings" }, @@ -96,28 +156,25 @@ { "target": "com.amazonaws.lakeformation#SearchTablesByLFTags" }, + { + "target": "com.amazonaws.lakeformation#StartQueryPlanning" + }, + { + "target": "com.amazonaws.lakeformation#StartTransaction" + }, { "target": "com.amazonaws.lakeformation#UpdateLFTag" }, { "target": "com.amazonaws.lakeformation#UpdateResource" - } - ], - "traits": { - "aws.api#service": { - "sdkId": "LakeFormation", - "arnNamespace": "lakeformation", - "cloudFormationName": "LakeFormation", - "cloudTrailEventSource": "lakeformation.amazonaws.com", - "endpointPrefix": "lakeformation" }, - "aws.auth#sigv4": { - "name": "lakeformation" + { + "target": "com.amazonaws.lakeformation#UpdateTableObjects" }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "AWS Lake Formation\n

                                                                      Defines the public endpoint for the AWS Lake Formation service.

                                                                      ", - "smithy.api#title": "AWS Lake Formation" - } + { + "target": "com.amazonaws.lakeformation#UpdateTableStorageOptimizer" + } + ] }, "com.amazonaws.lakeformation#AccessDeniedException": { "type": "structure", @@ -131,7 +188,8 @@ }, "traits": { "smithy.api#documentation": "

                                                                      Access to a resource was denied.

                                                                      ", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 403 } }, "com.amazonaws.lakeformation#AddLFTagsToResource": { @@ -163,7 +221,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Attaches one or more tags to an existing resource.

                                                                      " + "smithy.api#documentation": "

                                                                      Attaches one or more LF-tags to an existing resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/AddLFTagsToResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#AddLFTagsToResourceRequest": { @@ -172,20 +235,20 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Resource": { "target": "com.amazonaws.lakeformation#Resource", "traits": { - "smithy.api#documentation": "

                                                                      The resource to which to attach a tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The database, table, or column resource to which to attach an LF-tag.

                                                                      ", "smithy.api#required": {} } }, "LFTags": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      The tags to attach to the resource.

                                                                      ", + "smithy.api#documentation": "

                                                                      The LF-tags to attach to the resource.

                                                                      ", "smithy.api#required": {} } } @@ -202,6 +265,48 @@ } } }, + "com.amazonaws.lakeformation#AddObjectInput": { + "type": "structure", + "members": { + "Uri": { + "target": "com.amazonaws.lakeformation#URI", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 location of the object.

                                                                      ", + "smithy.api#required": {} + } + }, + "ETag": { + "target": "com.amazonaws.lakeformation#ETagString", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                                                      ", + "smithy.api#required": {} + } + }, + "Size": { + "target": "com.amazonaws.lakeformation#ObjectSize", + "traits": { + "smithy.api#documentation": "

                                                                      The size of the Amazon S3 object in bytes.

                                                                      ", + "smithy.api#required": {} + } + }, + "PartitionValues": { + "target": "com.amazonaws.lakeformation#PartitionValuesList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of partition values for the object. A value must be specified for each partition key associated with the table.

                                                                      \n\t

                                                                      The supported data types are integer, long, date(yyyy-MM-dd), timestamp(yyyy-MM-dd HH:mm:ssXXX or yyyy-MM-dd HH:mm:ss\"), string and decimal.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A new object to add to the governed table.

                                                                      " + } + }, + "com.amazonaws.lakeformation#AllRowsWildcard": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that you pass to indicate you want all rows in a filter.

                                                                      " + } + }, "com.amazonaws.lakeformation#AlreadyExistsException": { "type": "structure", "members": { @@ -234,7 +339,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Batch operation to grant permissions to the principal.

                                                                      " + "smithy.api#documentation": "

                                                                      Batch operation to grant permissions to the principal.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/BatchGrantPermissions", + "code": 200 + } } }, "com.amazonaws.lakeformation#BatchGrantPermissionsRequest": { @@ -243,7 +353,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Entries": { @@ -354,7 +464,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Batch operation to revoke permissions from the principal.

                                                                      " + "smithy.api#documentation": "

                                                                      Batch operation to revoke permissions from the principal.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/BatchRevokePermissions", + "code": 200 + } } }, "com.amazonaws.lakeformation#BatchRevokePermissionsRequest": { @@ -363,7 +478,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Entries": { @@ -392,6 +507,62 @@ "smithy.api#box": {} } }, + "com.amazonaws.lakeformation#CancelTransaction": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#CancelTransactionRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#CancelTransactionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommitInProgressException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommittedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Attempts to cancel the specified transaction. Returns an exception if the transaction was previously committed.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/CancelTransaction", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#CancelTransactionRequest": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction to cancel.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#CancelTransactionResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.lakeformation#CatalogIdString": { "type": "string", "traits": { @@ -399,7 +570,7 @@ "min": 1, "max": 255 }, - "smithy.api#pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" } }, "com.amazonaws.lakeformation#CatalogResource": { @@ -421,12 +592,12 @@ "LFTags": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      The tags attached to a column resource.

                                                                      " + "smithy.api#documentation": "

                                                                      The LF-tags attached to a column resource.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing the name of a column resource and the tags attached to it.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing the name of a column resource and the LF-tags attached to it.

                                                                      " } }, "com.amazonaws.lakeformation#ColumnLFTagsList": { @@ -455,6 +626,66 @@ "smithy.api#documentation": "

                                                                      A wildcard object, consisting of an optional list of excluded column names or indexes.

                                                                      " } }, + "com.amazonaws.lakeformation#CommitTransaction": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#CommitTransactionRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#CommitTransactionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCanceledException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Attempts to commit the specified transaction. Returns an exception if the transaction was previously aborted. This API action is idempotent if called multiple times for the same transaction.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/CommitTransaction", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#CommitTransactionRequest": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction to commit.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#CommitTransactionResponse": { + "type": "structure", + "members": { + "TransactionStatus": { + "target": "com.amazonaws.lakeformation#TransactionStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the transaction.

                                                                      " + } + } + } + }, "com.amazonaws.lakeformation#ComparisonOperator": { "type": "string", "traits": { @@ -521,6 +752,62 @@ "smithy.api#error": "client" } }, + "com.amazonaws.lakeformation#CreateDataCellsFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#CreateDataCellsFilterRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#CreateDataCellsFilterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#AlreadyExistsException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#ResourceNumberLimitExceededException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a data cell filter to allow one to grant access to certain columns on certain rows.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/CreateDataCellsFilter", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#CreateDataCellsFilterRequest": { + "type": "structure", + "members": { + "TableData": { + "target": "com.amazonaws.lakeformation#DataCellsFilter", + "traits": { + "smithy.api#documentation": "

                                                                      A DataCellsFilter structure containing information about the data cells filter.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#CreateDataCellsFilterResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.lakeformation#CreateLFTag": { "type": "operation", "input": { @@ -550,7 +837,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a tag with the specified name and values.

                                                                      " + "smithy.api#documentation": "

                                                                      Creates an LF-tag with the specified name and values.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/CreateLFTag", + "code": 200 + } } }, "com.amazonaws.lakeformation#CreateLFTagRequest": { @@ -559,13 +851,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      ", "smithy.api#required": {} } }, @@ -582,13 +874,105 @@ "type": "structure", "members": {} }, - "com.amazonaws.lakeformation#DataLakePrincipal": { + "com.amazonaws.lakeformation#DataCellsFilter": { "type": "structure", "members": { - "DataLakePrincipalIdentifier": { - "target": "com.amazonaws.lakeformation#DataLakePrincipalString", + "TableCatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the catalog to which the table belongs.

                                                                      ", + "smithy.api#required": {} + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      A database in the Glue Data Catalog.

                                                                      ", + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      A table in the database.

                                                                      ", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name given by the user to the data filter cell.

                                                                      ", + "smithy.api#required": {} + } + }, + "RowFilter": { + "target": "com.amazonaws.lakeformation#RowFilter", + "traits": { + "smithy.api#documentation": "

                                                                      A PartiQL predicate.

                                                                      " + } + }, + "ColumnNames": { + "target": "com.amazonaws.lakeformation#ColumnNames", + "traits": { + "smithy.api#documentation": "

                                                                      A list of column names.

                                                                      " + } + }, + "ColumnWildcard": { + "target": "com.amazonaws.lakeformation#ColumnWildcard", "traits": { - "smithy.api#documentation": "

                                                                      An identifier for the AWS Lake Formation principal.

                                                                      " + "smithy.api#documentation": "

                                                                      A wildcard with exclusions.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that describes certain columns on certain rows.

                                                                      " + } + }, + "com.amazonaws.lakeformation#DataCellsFilterList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#DataCellsFilter" + } + }, + "com.amazonaws.lakeformation#DataCellsFilterResource": { + "type": "structure", + "members": { + "TableCatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the catalog to which the table belongs.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      A database in the Glue Data Catalog.

                                                                      " + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the table.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the data cells filter.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure for a data cells filter resource.

                                                                      " + } + }, + "com.amazonaws.lakeformation#DataLakePrincipal": { + "type": "structure", + "members": { + "DataLakePrincipalIdentifier": { + "target": "com.amazonaws.lakeformation#DataLakePrincipalString", + "traits": { + "smithy.api#documentation": "

                                                                      An identifier for the Lake Formation principal.

                                                                      " } } }, @@ -662,30 +1046,30 @@ "DataLakeAdmins": { "target": "com.amazonaws.lakeformation#DataLakePrincipalList", "traits": { - "smithy.api#documentation": "

                                                                      A list of AWS Lake Formation principals. Supported principals are IAM users or IAM roles.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of Lake Formation principals. Supported principals are IAM users or IAM roles.

                                                                      " } }, "CreateDatabaseDefaultPermissions": { "target": "com.amazonaws.lakeformation#PrincipalPermissionsList", "traits": { - "smithy.api#documentation": "

                                                                      A structure representing a list of up to three principal permissions entries for default create database permissions.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies whether access control on newly created database is managed by Lake Formation permissions or exclusively by IAM permissions. You can override this default setting when you create a database.

                                                                      \n\t \n

                                                                      A null value indicates access control by Lake Formation permissions. A value that assigns ALL to IAM_ALLOWED_PRINCIPALS indicates access control by IAM permissions. This is referred to as the setting \"Use only IAM access control,\" and is for backward compatibility with the Glue permission model implemented by IAM permissions.

                                                                      \n\t\n\t

                                                                      The only permitted values are an empty array or an array that contains a single JSON object that grants ALL to IAM_ALLOWED_PRINCIPALS.

                                                                      \n\n

                                                                      For more information, see Changing the Default Security Settings for Your Data Lake.

                                                                      " } }, "CreateTableDefaultPermissions": { "target": "com.amazonaws.lakeformation#PrincipalPermissionsList", "traits": { - "smithy.api#documentation": "

                                                                      A structure representing a list of up to three principal permissions entries for default create table permissions.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies whether access control on newly created table is managed by Lake Formation permissions or exclusively by IAM permissions.

                                                                      \n\t \n

                                                                      A null value indicates access control by Lake Formation permissions. A value that assigns ALL to IAM_ALLOWED_PRINCIPALS indicates access control by IAM permissions. This is referred to as the setting \"Use only IAM access control,\" and is for backward compatibility with the Glue permission model implemented by IAM permissions.

                                                                      \n\t\n\t

                                                                      The only permitted values are an empty array or an array that contains a single JSON object that grants ALL to IAM_ALLOWED_PRINCIPALS.

                                                                      \n \n

                                                                      For more information, see Changing the Default Security Settings for Your Data Lake.

                                                                      " } }, "TrustedResourceOwners": { "target": "com.amazonaws.lakeformation#TrustedResourceOwners", "traits": { - "smithy.api#documentation": "

                                                                      A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's AWS CloudTrail log.

                                                                      \n\t\n\t

                                                                      You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of the resource-owning account IDs that the caller's account can use to share their user access details (user ARNs). The user ARNs can be logged in the resource owner's CloudTrail log.

                                                                      \n\t\n\t

                                                                      You may want to specify this property when you are in a high-trust boundary, such as the same team or company.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure representing a list of AWS Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure representing a list of Lake Formation principals designated as data lake administrators and lists of principal permission entries for default create database and default create table permissions.

                                                                      " } }, "com.amazonaws.lakeformation#DataLocationResource": { @@ -694,7 +1078,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog where the location is registered with AWS Lake Formation. By default, it is the account ID of the caller.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog where the location is registered with Lake Formation. By default, it is the account ID of the caller.

                                                                      " } }, "ResourceArn": { @@ -736,6 +1120,79 @@ "smithy.api#documentation": "

                                                                      A structure for the database object.

                                                                      " } }, + "com.amazonaws.lakeformation#DateTime": { + "type": "timestamp", + "traits": { + "smithy.api#timestampFormat": "date-time" + } + }, + "com.amazonaws.lakeformation#DeleteDataCellsFilter": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#DeleteDataCellsFilterRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#DeleteDataCellsFilterResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a data cell filter.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DeleteDataCellsFilter", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#DeleteDataCellsFilterRequest": { + "type": "structure", + "members": { + "TableCatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the catalog to which the table belongs.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      A database in the Glue Data Catalog.

                                                                      " + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      A table in the database.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name given by the user to the data filter cell.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#DeleteDataCellsFilterResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.lakeformation#DeleteLFTag": { "type": "operation", "input": { @@ -762,7 +1219,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes the specified tag key name. If the attribute key does not exist or the tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message \"Delete not allowed\" as the tag key is still attached with resources. You can consider untagging resources with this tag key.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes the specified LF-tag key name. If the attribute key does not exist or the LF-tag does not exist, then the operation will not do anything. If the attribute key exists, then the operation checks if any resources are tagged with this attribute key, if yes, the API throws a 400 Exception with the message \"Delete not allowed\" as the LF-tag key is still attached with resources. You can consider untagging resources with this LF-tag key.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DeleteLFTag", + "code": 200 + } } }, "com.amazonaws.lakeformation#DeleteLFTagRequest": { @@ -771,13 +1233,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag to delete.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag to delete.

                                                                      ", "smithy.api#required": {} } } @@ -787,6 +1249,119 @@ "type": "structure", "members": {} }, + "com.amazonaws.lakeformation#DeleteObjectInput": { + "type": "structure", + "members": { + "Uri": { + "target": "com.amazonaws.lakeformation#URI", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 location of the object to delete.

                                                                      ", + "smithy.api#required": {} + } + }, + "ETag": { + "target": "com.amazonaws.lakeformation#ETagString", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                                                      " + } + }, + "PartitionValues": { + "target": "com.amazonaws.lakeformation#PartitionValuesList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of partition values for the object. A value must be specified for each partition key associated with the governed table.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object to delete from the governed table.

                                                                      " + } + }, + "com.amazonaws.lakeformation#DeleteObjectsOnCancel": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#DeleteObjectsOnCancelRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#DeleteObjectsOnCancelResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#ResourceNotReadyException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCanceledException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommittedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      For a specific governed table, provides a list of Amazon S3 objects that will be written during the current transaction and that can be automatically deleted\n if the transaction is canceled. Without this call, no Amazon S3 objects are automatically deleted when a transaction cancels.\n

                                                                      \n

                                                                      \n The Glue ETL library function write_dynamic_frame.from_catalog() includes an option to automatically \n call DeleteObjectsOnCancel before writes. For more information, see \n Rolling Back Amazon S3 Writes.\n

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DeleteObjectsOnCancel", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#DeleteObjectsOnCancelRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The Glue data catalog that contains the governed table. Defaults to the current account ID.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The database that contains the governed table.

                                                                      ", + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the governed table.

                                                                      ", + "smithy.api#required": {} + } + }, + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      ID of the transaction that the writes occur in.

                                                                      ", + "smithy.api#required": {} + } + }, + "Objects": { + "target": "com.amazonaws.lakeformation#VirtualObjectList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of VirtualObject structures, which indicates the Amazon S3 objects to be deleted if the transaction cancels.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#DeleteObjectsOnCancelResponse": { + "type": "structure", + "members": {} + }, "com.amazonaws.lakeformation#DeregisterResource": { "type": "operation", "input": { @@ -810,7 +1385,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deregisters the resource as managed by the Data Catalog.

                                                                      \n\t\n

                                                                      When you deregister a path, Lake Formation removes the path from the inline policy attached to your service-linked role.

                                                                      " + "smithy.api#documentation": "

                                                                      Deregisters the resource as managed by the Data Catalog.

                                                                      \n\t\n

                                                                      When you deregister a path, Lake Formation removes the path from the inline policy attached to your service-linked role.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DeregisterResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#DeregisterResourceRequest": { @@ -852,7 +1432,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the current data access role for the given resource registered in AWS Lake Formation.

                                                                      " + "smithy.api#documentation": "

                                                                      Retrieves the current data access role for the given resource registered in Lake Formation.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DescribeResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#DescribeResourceRequest": { @@ -873,7 +1458,61 @@ "ResourceInfo": { "target": "com.amazonaws.lakeformation#ResourceInfo", "traits": { - "smithy.api#documentation": "

                                                                      A structure containing information about an AWS Lake Formation resource.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing information about an Lake Formation resource.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#DescribeTransaction": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#DescribeTransactionRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#DescribeTransactionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the details of a single transaction.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/DescribeTransaction", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#DescribeTransactionRequest": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction for which to return status.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#DescribeTransactionResponse": { + "type": "structure", + "members": { + "TransactionDescription": { + "target": "com.amazonaws.lakeformation#TransactionDescription", + "traits": { + "smithy.api#documentation": "

                                                                      Returns a TransactionDescription object containing information about the transaction.

                                                                      " } } } @@ -885,7 +1524,7 @@ "min": 0, "max": 2048 }, - "smithy.api#pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" } }, "com.amazonaws.lakeformation#DetailsMap": { @@ -894,12 +1533,22 @@ "ResourceShare": { "target": "com.amazonaws.lakeformation#ResourceShareList", "traits": { - "smithy.api#documentation": "

                                                                      A resource share ARN for a catalog resource shared through AWS Resource Access Manager (AWS RAM).

                                                                      " + "smithy.api#documentation": "

                                                                      A resource share ARN for a catalog resource shared through RAM.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing the additional details to be returned in the AdditionalDetails attribute of PrincipalResourcePermissions.

                                                                      \n \n

                                                                      If a catalog resource is shared through AWS Resource Access Manager (AWS RAM), then there will exist a corresponding RAM resource share ARN.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing the additional details to be returned in the AdditionalDetails attribute of PrincipalResourcePermissions.

                                                                      \n \n

                                                                      If a catalog resource is shared through Resource Access Manager (RAM), then there will exist a corresponding RAM resource share ARN.

                                                                      " + } + }, + "com.amazonaws.lakeformation#ETagString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[\\p{L}\\p{N}\\p{P}]*$" } }, "com.amazonaws.lakeformation#EntityNotFoundException": { @@ -937,6 +1586,51 @@ "smithy.api#documentation": "

                                                                      Contains details about an error.

                                                                      " } }, + "com.amazonaws.lakeformation#ErrorMessageString": { + "type": "string" + }, + "com.amazonaws.lakeformation#ExecutionStatistics": { + "type": "structure", + "members": { + "AverageExecutionTimeMillis": { + "target": "com.amazonaws.lakeformation#NumberOfMilliseconds", + "traits": { + "smithy.api#documentation": "

                                                                      The average time the request took to be executed.

                                                                      " + } + }, + "DataScannedBytes": { + "target": "com.amazonaws.lakeformation#NumberOfBytes", + "traits": { + "smithy.api#documentation": "

                                                                      The amount of data that was scanned in bytes.

                                                                      " + } + }, + "WorkUnitsExecutedCount": { + "target": "com.amazonaws.lakeformation#NumberOfItems", + "traits": { + "smithy.api#documentation": "

                                                                      The number of work units executed.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Statistics related to the processing of a query statement.

                                                                      " + } + }, + "com.amazonaws.lakeformation#ExpiredException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error where the query request expired.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 410 + } + }, "com.amazonaws.lakeformation#Expression": { "type": "list", "member": { @@ -949,10 +1643,65 @@ } } }, - "com.amazonaws.lakeformation#FieldNameString": { - "type": "string", - "traits": { - "smithy.api#enum": [ + "com.amazonaws.lakeformation#ExtendTransaction": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#ExtendTransactionRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#ExtendTransactionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCanceledException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommitInProgressException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommittedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Indicates to the service that the specified transaction is still active and should not be treated as idle and aborted.

                                                                      \n\t\n\t

                                                                      Write transactions that remain idle for a long period are automatically aborted unless explicitly extended.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ExtendTransaction", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#ExtendTransactionRequest": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction to extend.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ExtendTransactionResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.lakeformation#FieldNameString": { + "type": "string", + "traits": { + "smithy.api#enum": [ { "value": "RESOURCE_ARN", "name": "RESOURCE_ARN" @@ -1026,7 +1775,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the list of the data lake administrators of a Lake Formation-managed data lake.

                                                                      " + "smithy.api#documentation": "

                                                                      Retrieves the list of the data lake administrators of a Lake Formation-managed data lake.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetDataLakeSettings", + "code": 200 + } } }, "com.amazonaws.lakeformation#GetDataLakeSettingsRequest": { @@ -1035,7 +1789,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } } } @@ -1046,7 +1800,7 @@ "DataLakeSettings": { "target": "com.amazonaws.lakeformation#DataLakeSettings", "traits": { - "smithy.api#documentation": "

                                                                      A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure representing a list of Lake Formation principals designated as data lake administrators.

                                                                      " } } } @@ -1075,6 +1829,11 @@ ], "traits": { "smithy.api#documentation": "

                                                                      Returns the Lake Formation permissions for a specified table or database resource located\n at a path in Amazon S3. GetEffectivePermissionsForPath will not return databases and tables if the catalog is encrypted.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetEffectivePermissionsForPath", + "code": 200 + }, "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -1088,7 +1847,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "ResourceArn": { @@ -1155,7 +1914,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns a tag definition.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns an LF-tag definition.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetLFTag", + "code": 200 + } } }, "com.amazonaws.lakeformation#GetLFTagRequest": { @@ -1164,13 +1928,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      ", "smithy.api#required": {} } } @@ -1182,13 +1946,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      " + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      " } }, "TagValues": { @@ -1199,6 +1963,163 @@ } } }, + "com.amazonaws.lakeformation#GetQueryState": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetQueryStateRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetQueryStateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the state of a query previously submitted. Clients are expected to poll GetQueryState to monitor the current state of the planning before retrieving the work units. A query state is only visible to the principal that made the initial call to StartQueryPlanning.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "query-" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/GetQueryState", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GetQueryStateRequest": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.lakeformation#GetQueryStateRequestQueryIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the plan query operation.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetQueryStateRequestQueryIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, + "com.amazonaws.lakeformation#GetQueryStateResponse": { + "type": "structure", + "members": { + "Error": { + "target": "com.amazonaws.lakeformation#ErrorMessageString", + "traits": { + "smithy.api#documentation": "

                                                                      An error message when the operation fails.

                                                                      " + } + }, + "State": { + "target": "com.amazonaws.lakeformation#QueryStateString", + "traits": { + "smithy.api#documentation": "

                                                                      The state of a query previously submitted. The possible states are:

                                                                      \n\t\n\t
                                                                        \n
                                                                      • \n

                                                                        PENDING: the query is pending.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        WORKUNITS_AVAILABLE: some work units are ready for retrieval and execution.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        FINISHED: the query planning finished successfully, and all work units are ready for retrieval and execution.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ERROR: an error occurred with the query, such as an invalid query ID or a backend error.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure for the output.

                                                                      " + } + }, + "com.amazonaws.lakeformation#GetQueryStatistics": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetQueryStatisticsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetQueryStatisticsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#ExpiredException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#StatisticsNotReadyYetException" + }, + { + "target": "com.amazonaws.lakeformation#ThrottledException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves statistics on the planning and execution of a query.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "query-" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/GetQueryStatistics", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GetQueryStatisticsRequest": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.lakeformation#GetQueryStatisticsRequestQueryIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the plan query operation.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetQueryStatisticsRequestQueryIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, + "com.amazonaws.lakeformation#GetQueryStatisticsResponse": { + "type": "structure", + "members": { + "ExecutionStatistics": { + "target": "com.amazonaws.lakeformation#ExecutionStatistics", + "traits": { + "smithy.api#documentation": "

                                                                      An ExecutionStatistics structure containing execution statistics.

                                                                      " + } + }, + "PlanningStatistics": { + "target": "com.amazonaws.lakeformation#PlanningStatistics", + "traits": { + "smithy.api#documentation": "

                                                                      A PlanningStatistics structure containing query planning statistics.

                                                                      " + } + }, + "QuerySubmissionTime": { + "target": "com.amazonaws.lakeformation#DateTime", + "traits": { + "smithy.api#documentation": "

                                                                      The time that the query was submitted.

                                                                      " + } + } + } + }, "com.amazonaws.lakeformation#GetResourceLFTags": { "type": "operation", "input": { @@ -1228,7 +2149,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the tags applied to a resource.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the LF-tags applied to a resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetResourceLFTags", + "code": 200 + } } }, "com.amazonaws.lakeformation#GetResourceLFTagsRequest": { @@ -1237,20 +2163,20 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Resource": { "target": "com.amazonaws.lakeformation#Resource", "traits": { - "smithy.api#documentation": "

                                                                      The resource for which you want to return tags.

                                                                      ", + "smithy.api#documentation": "

                                                                      The database, table, or column resource for which you want to return LF-tags.

                                                                      ", "smithy.api#required": {} } }, "ShowAssignedLFTags": { "target": "com.amazonaws.lakeformation#BooleanNullable", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether to show the assigned tags.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether to show the assigned LF-tags.

                                                                      " } } } @@ -1261,131 +2187,452 @@ "LFTagOnDatabase": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags applied to a database resource.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags applied to a database resource.

                                                                      " } }, "LFTagsOnTable": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags applied to a table resource.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags applied to a table resource.

                                                                      " } }, "LFTagsOnColumns": { "target": "com.amazonaws.lakeformation#ColumnLFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags applied to a column resource.

                                                                      " - } - } - } - }, - "com.amazonaws.lakeformation#GlueEncryptionException": { - "type": "structure", - "members": { - "Message": { - "target": "com.amazonaws.lakeformation#MessageString", - "traits": { - "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags applied to a column resource.

                                                                      " } } - }, - "traits": { - "smithy.api#documentation": "

                                                                      An encryption operation failed.

                                                                      ", - "smithy.api#error": "client" } }, - "com.amazonaws.lakeformation#GrantPermissions": { + "com.amazonaws.lakeformation#GetTableObjects": { "type": "operation", "input": { - "target": "com.amazonaws.lakeformation#GrantPermissionsRequest" + "target": "com.amazonaws.lakeformation#GetTableObjectsRequest" }, "output": { - "target": "com.amazonaws.lakeformation#GrantPermissionsResponse" + "target": "com.amazonaws.lakeformation#GetTableObjectsResponse" }, "errors": [ { - "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + "target": "com.amazonaws.lakeformation#EntityNotFoundException" }, { - "target": "com.amazonaws.lakeformation#EntityNotFoundException" + "target": "com.amazonaws.lakeformation#InternalServiceException" }, { "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#ResourceNotReadyException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCanceledException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommittedException" } ], "traits": { - "smithy.api#documentation": "

                                                                      Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

                                                                      \n\t

                                                                      For information about permissions, see Security and Access Control to Metadata and Data.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the set of Amazon S3 objects that make up the specified governed table. A transaction ID or timestamp can be specified for time-travel queries.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GetTableObjects", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } } }, - "com.amazonaws.lakeformation#GrantPermissionsRequest": { + "com.amazonaws.lakeformation#GetTableObjectsRequest": { "type": "structure", "members": { "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The catalog containing the governed table. Defaults to the caller’s account.

                                                                      " } }, - "Principal": { - "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", "traits": { - "smithy.api#documentation": "

                                                                      The principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles, and they are defined by their principal type and their ARN.

                                                                      \n\t

                                                                      Note that if you define a resource with a particular ARN, then later delete, and recreate a resource with that same ARN, the resource maintains the permissions already granted.

                                                                      ", + "smithy.api#documentation": "

                                                                      The database containing the governed table.

                                                                      ", "smithy.api#required": {} } }, - "Resource": { - "target": "com.amazonaws.lakeformation#Resource", + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", "traits": { - "smithy.api#documentation": "

                                                                      The resource to which permissions are to be granted. Resources in AWS Lake Formation are the Data Catalog, databases, and tables.

                                                                      ", + "smithy.api#documentation": "

                                                                      The governed table for which to retrieve objects.

                                                                      ", "smithy.api#required": {} } }, - "Permissions": { - "target": "com.amazonaws.lakeformation#PermissionList", + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", "traits": { - "smithy.api#documentation": "

                                                                      The permissions granted to the principal on the resource. AWS Lake Formation defines privileges to grant and revoke access to metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. AWS Lake Formation requires that each principal be authorized to perform a specific task on AWS Lake Formation resources.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The transaction ID at which to read the governed table contents. If this transaction has aborted, an error is returned. If not set, defaults to the most recent committed transaction. Cannot be specified along with QueryAsOfTime.

                                                                      " } }, - "PermissionsWithGrantOption": { - "target": "com.amazonaws.lakeformation#PermissionList", + "QueryAsOfTime": { + "target": "com.amazonaws.lakeformation#Timestamp", "traits": { - "smithy.api#documentation": "

                                                                      Indicates a list of the granted permissions that the principal may pass to other users. These permissions may only be a subset of the permissions granted in the Privileges.

                                                                      " + "smithy.api#documentation": "

                                                                      The time as of when to read the governed table contents. If not set, the most recent transaction commit time is used. Cannot be specified along with TransactionId.

                                                                      " + } + }, + "PartitionPredicate": { + "target": "com.amazonaws.lakeformation#PredicateString", + "traits": { + "smithy.api#documentation": "

                                                                      A predicate to filter the objects returned based on the partition keys defined in the governed table.

                                                                      \n\t
                                                                        \n
                                                                      • \n

                                                                        The comparison operators supported are: =, >, <, >=, <=

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The logical operators supported are: AND

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The data types supported are integer, long, date(yyyy-MM-dd), timestamp(yyyy-MM-dd HH:mm:ssXXX or yyyy-MM-dd HH:mm:ss\"), string and decimal.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.lakeformation#PageSize", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies how many values to return in a page.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#TokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token if this is not the first call to retrieve these objects.

                                                                      " } } } }, - "com.amazonaws.lakeformation#GrantPermissionsResponse": { + "com.amazonaws.lakeformation#GetTableObjectsResponse": { "type": "structure", - "members": {} - }, - "com.amazonaws.lakeformation#IAMRoleArn": { - "type": "string", - "traits": { - "smithy.api#pattern": "arn:aws:iam::[0-9]*:role/.*" + "members": { + "Objects": { + "target": "com.amazonaws.lakeformation#PartitionedTableObjectsList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of objects organized by partition keys.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#TokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token indicating whether additional data is available.

                                                                      " + } + } } }, - "com.amazonaws.lakeformation#Identifier": { - "type": "string", + "com.amazonaws.lakeformation#GetWorkUnitResults": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetWorkUnitResultsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetWorkUnitResultsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#ExpiredException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#ThrottledException" + } + ], "traits": { - "smithy.api#length": { - "min": 1, - "max": 255 + "smithy.api#documentation": "

                                                                      Returns the work units resulting from the query. Work units can be executed in any order and in parallel.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "data-" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/GetWorkUnitResults", + "code": 200 } } }, - "com.amazonaws.lakeformation#InternalServiceException": { + "com.amazonaws.lakeformation#GetWorkUnitResultsRequest": { "type": "structure", "members": { - "Message": { - "target": "com.amazonaws.lakeformation#MessageString", + "QueryId": { + "target": "com.amazonaws.lakeformation#GetWorkUnitResultsRequestQueryIdString", "traits": { - "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the plan query operation for which to get results.

                                                                      ", + "smithy.api#required": {} + } + }, + "WorkUnitId": { + "target": "com.amazonaws.lakeformation#GetWorkUnitResultsRequestWorkUnitIdLong", + "traits": { + "smithy.api#documentation": "

                                                                      The work unit ID for which to get results. Value generated by enumerating WorkUnitIdMin to WorkUnitIdMax (inclusive) from the WorkUnitRange in the output of GetWorkUnits.

                                                                      ", + "smithy.api#required": {} + } + }, + "WorkUnitToken": { + "target": "com.amazonaws.lakeformation#SyntheticGetWorkUnitResultsRequestWorkUnitTokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A work token used to query the execution service. Token output from GetWorkUnits.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitResultsRequestQueryIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitResultsRequestWorkUnitIdLong": { + "type": "long", + "traits": { + "smithy.api#range": { + "min": 0 + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitResultsResponse": { + "type": "structure", + "members": { + "ResultStream": { + "target": "com.amazonaws.lakeformation#ResultStream", + "traits": { + "smithy.api#documentation": "

                                                                      Rows returned from the GetWorkUnitResults operation as a stream of Apache Arrow v1.0 messages.

                                                                      ", + "smithy.api#httpPayload": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure for the output.

                                                                      " + } + }, + "com.amazonaws.lakeformation#GetWorkUnits": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GetWorkUnitsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GetWorkUnitsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#ExpiredException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#WorkUnitsNotReadyYetException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the work units generated by the StartQueryPlanning operation.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "query-" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/GetWorkUnits", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "PageSize" + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitsRequest": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token, if this is a continuation call.

                                                                      " + } + }, + "PageSize": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The size of each page to get in the Amazon Web Services service call. This does not affect the number of items returned in the command's output. Setting a smaller page size results in more calls to the Amazon Web Services service, retrieving fewer items in each call. This can help prevent the Amazon Web Services service calls from timing out.

                                                                      " + } + }, + "QueryId": { + "target": "com.amazonaws.lakeformation#GetWorkUnitsRequestQueryIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the plan query operation.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitsRequestQueryIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + } + } + }, + "com.amazonaws.lakeformation#GetWorkUnitsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

                                                                      " + } + }, + "QueryId": { + "target": "com.amazonaws.lakeformation#QueryIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the plan query operation.

                                                                      ", + "smithy.api#required": {} + } + }, + "WorkUnitRanges": { + "target": "com.amazonaws.lakeformation#WorkUnitRangeList", + "traits": { + "smithy.api#documentation": "

                                                                      A WorkUnitRangeList object that specifies the valid range of work unit IDs for querying the execution service.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure for the output.

                                                                      " + } + }, + "com.amazonaws.lakeformation#GlueEncryptionException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An encryption operation failed.

                                                                      ", + "smithy.api#error": "client" + } + }, + "com.amazonaws.lakeformation#GrantPermissions": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#GrantPermissionsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#GrantPermissionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Grants permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

                                                                      \n\t

                                                                      For information about permissions, see Security and Access Control to Metadata and Data.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/GrantPermissions", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#GrantPermissionsRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " + } + }, + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      The principal to be granted the permissions on the resource. Supported principals are IAM users or IAM roles, and they are defined by their principal type and their ARN.

                                                                      \n\t

                                                                      Note that if you define a resource with a particular ARN, then later delete, and recreate a resource with that same ARN, the resource maintains the permissions already granted.

                                                                      ", + "smithy.api#required": {} + } + }, + "Resource": { + "target": "com.amazonaws.lakeformation#Resource", + "traits": { + "smithy.api#documentation": "

                                                                      The resource to which permissions are to be granted. Resources in Lake Formation are the Data Catalog, databases, and tables.

                                                                      ", + "smithy.api#required": {} + } + }, + "Permissions": { + "target": "com.amazonaws.lakeformation#PermissionList", + "traits": { + "smithy.api#documentation": "

                                                                      The permissions granted to the principal on the resource. Lake Formation defines privileges to grant and revoke access to metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3. Lake Formation requires that each principal be authorized to perform a specific task on Lake Formation resources.

                                                                      ", + "smithy.api#required": {} + } + }, + "PermissionsWithGrantOption": { + "target": "com.amazonaws.lakeformation#PermissionList", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates a list of the granted permissions that the principal may pass to other users. These permissions may only be a subset of the permissions granted in the Privileges.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#GrantPermissionsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.lakeformation#IAMRoleArn": { + "type": "string", + "traits": { + "smithy.api#pattern": "^arn:aws:iam::[0-9]*:role/" + } + }, + "com.amazonaws.lakeformation#Identifier": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + } + } + }, + "com.amazonaws.lakeformation#InternalServiceException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the problem.

                                                                      " } } }, "traits": { "smithy.api#documentation": "

                                                                      An internal service error occurred.

                                                                      ", - "smithy.api#error": "server" + "smithy.api#error": "server", + "smithy.api#httpError": 500 } }, "com.amazonaws.lakeformation#InvalidInputException": { @@ -1400,7 +2647,8 @@ }, "traits": { "smithy.api#documentation": "

                                                                      The input provided was not valid.

                                                                      ", - "smithy.api#error": "client" + "smithy.api#error": "client", + "smithy.api#httpError": 400 } }, "com.amazonaws.lakeformation#LFTag": { @@ -1409,7 +2657,7 @@ "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      ", "smithy.api#required": {} } }, @@ -1422,7 +2670,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure that allows an admin to grant user permissions on certain conditions. For example, granting a role access to all columns not tagged 'PII' of tables tagged 'Prod'.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure that allows an admin to grant user permissions on certain conditions. For example, granting a role access to all columns that do not have the LF-tag 'PII' in tables that have the LF-tag 'Prod'.

                                                                      " } }, "com.amazonaws.lakeformation#LFTagError": { @@ -1431,13 +2679,13 @@ "LFTag": { "target": "com.amazonaws.lakeformation#LFTagPair", "traits": { - "smithy.api#documentation": "

                                                                      The key-name of the tag.

                                                                      " + "smithy.api#documentation": "

                                                                      The key-name of the LF-tag.

                                                                      " } }, "Error": { "target": "com.amazonaws.lakeformation#ErrorDetail", "traits": { - "smithy.api#documentation": "

                                                                      An error that occurred with the attachment or detachment of the tag.

                                                                      " + "smithy.api#documentation": "

                                                                      An error that occurred with the attachment or detachment of the LF-tag.

                                                                      " } } }, @@ -1467,13 +2715,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#NameString", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      ", "smithy.api#required": {} } }, @@ -1486,7 +2734,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing a tag key and values for a resource.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing an LF-tag key and values for a resource.

                                                                      " } }, "com.amazonaws.lakeformation#LFTagPair": { @@ -1495,13 +2743,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag.

                                                                      ", "smithy.api#required": {} } }, @@ -1514,7 +2762,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing a tag key-value pair.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing an LF-tag key-value pair.

                                                                      " } }, "com.amazonaws.lakeformation#LFTagPolicyResource": { @@ -1523,26 +2771,26 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "ResourceType": { "target": "com.amazonaws.lakeformation#ResourceType", "traits": { - "smithy.api#documentation": "

                                                                      The resource type for which the tag policy applies.

                                                                      ", + "smithy.api#documentation": "

                                                                      The resource type for which the LF-tag policy applies.

                                                                      ", "smithy.api#required": {} } }, "Expression": { "target": "com.amazonaws.lakeformation#Expression", "traits": { - "smithy.api#documentation": "

                                                                      A list of tag conditions that apply to the resource's tag policy.

                                                                      ", + "smithy.api#documentation": "

                                                                      A list of LF-tag conditions that apply to the resource's LF-tag policy.

                                                                      ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing a list of tag conditions that apply to a resource's tag policy.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing a list of LF-tag conditions that apply to a resource's LF-tag policy.

                                                                      " } }, "com.amazonaws.lakeformation#LFTagValue": { @@ -1570,17 +2818,17 @@ "com.amazonaws.lakeformation#LastModifiedTimestamp": { "type": "timestamp" }, - "com.amazonaws.lakeformation#ListLFTags": { + "com.amazonaws.lakeformation#ListDataCellsFilter": { "type": "operation", "input": { - "target": "com.amazonaws.lakeformation#ListLFTagsRequest" + "target": "com.amazonaws.lakeformation#ListDataCellsFilterRequest" }, "output": { - "target": "com.amazonaws.lakeformation#ListLFTagsResponse" + "target": "com.amazonaws.lakeformation#ListDataCellsFilterResponse" }, "errors": [ { - "target": "com.amazonaws.lakeformation#EntityNotFoundException" + "target": "com.amazonaws.lakeformation#AccessDeniedException" }, { "target": "com.amazonaws.lakeformation#InternalServiceException" @@ -1593,64 +2841,75 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Lists tags that the requester has permission to view.

                                                                      " + "smithy.api#documentation": "

                                                                      Lists all the data cell filters on a table.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListDataCellsFilter", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DataCellsFilters", + "pageSize": "MaxResults" + } } }, - "com.amazonaws.lakeformation#ListLFTagsRequest": { + "com.amazonaws.lakeformation#ListDataCellsFilterRequest": { "type": "structure", "members": { - "CatalogId": { - "target": "com.amazonaws.lakeformation#CatalogIdString", + "Table": { + "target": "com.amazonaws.lakeformation#TableResource", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      A table in the Glue Data Catalog.

                                                                      " } }, - "ResourceShareType": { - "target": "com.amazonaws.lakeformation#ResourceShareType", + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", "traits": { - "smithy.api#documentation": "

                                                                      If resource share type is ALL, returns both in-account tags and shared tags that the requester has permission to view. If resource share type is FOREIGN, returns all share tags that the requester can view. If no resource share type is passed, lists tags in the given catalog ID that the requester has permission to view.

                                                                      " + "smithy.api#documentation": "

                                                                      A continuation token, if this is a continuation call.

                                                                      " } }, "MaxResults": { "target": "com.amazonaws.lakeformation#PageSize", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of results to return.

                                                                      " - } - }, - "NextToken": { - "target": "com.amazonaws.lakeformation#Token", - "traits": { - "smithy.api#documentation": "

                                                                      A continuation token, if this is not the first call to retrieve this list.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum size of the response.

                                                                      " } } } }, - "com.amazonaws.lakeformation#ListLFTagsResponse": { + "com.amazonaws.lakeformation#ListDataCellsFilterResponse": { "type": "structure", "members": { - "LFTags": { - "target": "com.amazonaws.lakeformation#LFTagsList", + "DataCellsFilters": { + "target": "com.amazonaws.lakeformation#DataCellsFilterList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags that the requested has permission to view.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of DataCellFilter structures.

                                                                      " } }, "NextToken": { "target": "com.amazonaws.lakeformation#Token", "traits": { - "smithy.api#documentation": "

                                                                      A continuation token, present if the current list segment is not the last.

                                                                      " + "smithy.api#documentation": "

                                                                      A continuation token, if not all requested data cell filters have been returned.

                                                                      " } } } }, - "com.amazonaws.lakeformation#ListPermissions": { + "com.amazonaws.lakeformation#ListLFTags": { "type": "operation", "input": { - "target": "com.amazonaws.lakeformation#ListPermissionsRequest" + "target": "com.amazonaws.lakeformation#ListLFTagsRequest" }, "output": { - "target": "com.amazonaws.lakeformation#ListPermissionsResponse" + "target": "com.amazonaws.lakeformation#ListLFTagsResponse" }, "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, { "target": "com.amazonaws.lakeformation#InternalServiceException" }, @@ -1662,32 +2921,117 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns a list of the principal permissions on the resource, filtered by the permissions of the caller. For example, if you are granted an ALTER permission, you are able to see only the principal permissions for ALTER.

                                                                      \n\t

                                                                      This operation returns only those permissions that have been explicitly granted.

                                                                      \n\t

                                                                      For information about permissions, see Security and Access Control to Metadata and Data.

                                                                      ", + "smithy.api#documentation": "

                                                                      Lists LF-tags that the requester has permission to view.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListLFTags", + "code": 200 + }, "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", + "items": "LFTags", "pageSize": "MaxResults" } } }, - "com.amazonaws.lakeformation#ListPermissionsRequest": { + "com.amazonaws.lakeformation#ListLFTagsRequest": { "type": "structure", "members": { "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, - "Principal": { - "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "ResourceShareType": { + "target": "com.amazonaws.lakeformation#ResourceShareType", "traits": { - "smithy.api#documentation": "

                                                                      Specifies a principal to filter the permissions returned.

                                                                      " + "smithy.api#documentation": "

                                                                      If resource share type is ALL, returns both in-account LF-tags and shared LF-tags that the requester has permission to view. If resource share type is FOREIGN, returns all share LF-tags that the requester can view. If no resource share type is passed, lists LF-tags in the given catalog ID that the requester has permission to view.

                                                                      " } }, - "ResourceType": { - "target": "com.amazonaws.lakeformation#DataLakeResourceType", - "traits": { + "MaxResults": { + "target": "com.amazonaws.lakeformation#PageSize", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token, if this is not the first call to retrieve this list.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ListLFTagsResponse": { + "type": "structure", + "members": { + "LFTags": { + "target": "com.amazonaws.lakeformation#LFTagsList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of LF-tags that the requested has permission to view.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token, present if the current list segment is not the last.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ListPermissions": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#ListPermissionsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#ListPermissionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns a list of the principal permissions on the resource, filtered by the permissions of the caller. For example, if you are granted an ALTER permission, you are able to see only the principal permissions for ALTER.

                                                                      \n\t

                                                                      This operation returns only those permissions that have been explicitly granted.

                                                                      \n\t

                                                                      For information about permissions, see Security and Access Control to Metadata and Data.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListPermissions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.lakeformation#ListPermissionsRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " + } + }, + "Principal": { + "target": "com.amazonaws.lakeformation#DataLakePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies a principal to filter the permissions returned.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.lakeformation#DataLakeResourceType", + "traits": { "smithy.api#documentation": "

                                                                      Specifies a resource type to filter the permissions returned.

                                                                      " } }, @@ -1708,6 +3052,12 @@ "traits": { "smithy.api#documentation": "

                                                                      The maximum number of results to return.

                                                                      " } + }, + "IncludeRelated": { + "target": "com.amazonaws.lakeformation#TrueFalseString", + "traits": { + "smithy.api#documentation": "

                                                                      Indicates that related permissions should be included in the results.

                                                                      " + } } } }, @@ -1749,6 +3099,11 @@ ], "traits": { "smithy.api#documentation": "

                                                                      Lists the resources registered to be managed by the Data Catalog.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListResources", + "code": 200 + }, "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -1796,6 +3151,181 @@ } } }, + "com.amazonaws.lakeformation#ListTableStorageOptimizers": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#ListTableStorageOptimizersRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#ListTableStorageOptimizersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns the configuration of all storage optimizers associated with a specified table.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListTableStorageOptimizers", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.lakeformation#ListTableStorageOptimizersRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The Catalog ID of the table.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the database where the table is present.

                                                                      ", + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the table.

                                                                      ", + "smithy.api#required": {} + } + }, + "StorageOptimizerType": { + "target": "com.amazonaws.lakeformation#OptimizerType", + "traits": { + "smithy.api#documentation": "

                                                                      The specific type of storage optimizers to list. The supported value is compaction.

                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.lakeformation#PageSize", + "traits": { + "smithy.api#documentation": "

                                                                      The number of storage optimizers to return on each call.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token, if this is a continuation call.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ListTableStorageOptimizersResponse": { + "type": "structure", + "members": { + "StorageOptimizerList": { + "target": "com.amazonaws.lakeformation#StorageOptimizerList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of the storage optimizers associated with a table.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token for paginating the returned list of tokens, returned if the current segment of the list is not the last.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ListTransactions": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#ListTransactionsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#ListTransactionsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns metadata about transactions and their status. To prevent the response from growing indefinitely, only uncommitted transactions and those available for time-travel queries are returned.

                                                                      \n\t

                                                                      This operation can help you identify uncommitted transactions or to get information about transactions.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/ListTransactions", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.lakeformation#ListTransactionsRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The catalog for which to list transactions. Defaults to the account ID of the caller.

                                                                      " + } + }, + "StatusFilter": { + "target": "com.amazonaws.lakeformation#TransactionStatusFilter", + "traits": { + "smithy.api#documentation": "

                                                                      A filter indicating the status of transactions to return. Options are ALL | COMPLETED | COMMITTED | ABORTED | ACTIVE. The default is ALL.

                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.lakeformation#PageSize", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of transactions to return in a single call.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#TokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token if this is not the first call to retrieve transactions.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#ListTransactionsResponse": { + "type": "structure", + "members": { + "Transactions": { + "target": "com.amazonaws.lakeformation#TransactionDescriptionList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of transactions. The record for each transaction is a TransactionDescription object.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.lakeformation#TokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A continuation token indicating whether additional data is available.

                                                                      " + } + } + } + }, "com.amazonaws.lakeformation#MessageString": { "type": "string" }, @@ -1806,7 +3336,7 @@ "min": 1, "max": 255 }, - "smithy.api#pattern": "[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*" + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" } }, "com.amazonaws.lakeformation#NullableBoolean": { @@ -1815,6 +3345,18 @@ "smithy.api#box": {} } }, + "com.amazonaws.lakeformation#NumberOfBytes": { + "type": "long" + }, + "com.amazonaws.lakeformation#NumberOfItems": { + "type": "long" + }, + "com.amazonaws.lakeformation#NumberOfMilliseconds": { + "type": "long" + }, + "com.amazonaws.lakeformation#ObjectSize": { + "type": "long" + }, "com.amazonaws.lakeformation#OperationTimeoutException": { "type": "structure", "members": { @@ -1830,6 +3372,25 @@ "smithy.api#error": "client" } }, + "com.amazonaws.lakeformation#OptimizerType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "COMPACTION", + "name": "COMPACTION" + }, + { + "value": "GARBAGE_COLLECTION", + "name": "GARBAGE_COLLECTION" + }, + { + "value": "ALL", + "name": "GENERIC" + } + ] + } + }, "com.amazonaws.lakeformation#PageSize": { "type": "integer", "traits": { @@ -1840,25 +3401,72 @@ } } }, - "com.amazonaws.lakeformation#Permission": { + "com.amazonaws.lakeformation#PartitionObjects": { + "type": "structure", + "members": { + "PartitionValues": { + "target": "com.amazonaws.lakeformation#PartitionValuesList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of partition values.

                                                                      " + } + }, + "Objects": { + "target": "com.amazonaws.lakeformation#TableObjectList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of table objects

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing a list of partition values and table objects.

                                                                      " + } + }, + "com.amazonaws.lakeformation#PartitionValueString": { "type": "string", "traits": { - "smithy.api#enum": [ - { - "value": "ALL", - "name": "ALL" - }, - { - "value": "SELECT", - "name": "SELECT" - }, - { - "value": "ALTER", - "name": "ALTER" - }, - { - "value": "DROP", - "name": "DROP" + "smithy.api#length": { + "min": 0, + "max": 1024 + } + } + }, + "com.amazonaws.lakeformation#PartitionValuesList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#PartitionValueString" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.lakeformation#PartitionedTableObjectsList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#PartitionObjects" + } + }, + "com.amazonaws.lakeformation#Permission": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "SELECT", + "name": "SELECT" + }, + { + "value": "ALTER", + "name": "ALTER" + }, + { + "value": "DROP", + "name": "DROP" }, { "value": "DELETE", @@ -1913,6 +3521,48 @@ "target": "com.amazonaws.lakeformation#Permission" } }, + "com.amazonaws.lakeformation#PlanningStatistics": { + "type": "structure", + "members": { + "EstimatedDataToScanBytes": { + "target": "com.amazonaws.lakeformation#NumberOfBytes", + "traits": { + "smithy.api#documentation": "

                                                                      An estimate of the data that was scanned in bytes.

                                                                      " + } + }, + "PlanningTimeMillis": { + "target": "com.amazonaws.lakeformation#NumberOfMilliseconds", + "traits": { + "smithy.api#documentation": "

                                                                      The time that it took to process the request.

                                                                      " + } + }, + "QueueTimeMillis": { + "target": "com.amazonaws.lakeformation#NumberOfMilliseconds", + "traits": { + "smithy.api#documentation": "

                                                                      The time the request was in queue to be processed.

                                                                      " + } + }, + "WorkUnitsGeneratedCount": { + "target": "com.amazonaws.lakeformation#NumberOfItems", + "traits": { + "smithy.api#documentation": "

                                                                      The number of work units generated.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Statistics related to the processing of a query statement.

                                                                      " + } + }, + "com.amazonaws.lakeformation#PredicateString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 2048 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + } + }, "com.amazonaws.lakeformation#PrincipalPermissions": { "type": "structure", "members": { @@ -2000,7 +3650,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Sets the list of data lake administrators who have admin privileges on all resources managed by Lake Formation. For more information on admin privileges, see Granting Lake Formation Permissions.

                                                                      \n\t\n\t

                                                                      This API replaces the current list of data lake admins with the new list being passed. To add an admin, fetch the current list and add the new admin to that list and pass that list in this API.

                                                                      " + "smithy.api#documentation": "

                                                                      Sets the list of data lake administrators who have admin privileges on all resources managed by Lake Formation. For more information on admin privileges, see Granting Lake Formation Permissions.

                                                                      \n\t\n\t

                                                                      This API replaces the current list of data lake admins with the new list being passed. To add an admin, fetch the current list and add the new admin to that list and pass that list in this API.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/PutDataLakeSettings", + "code": 200 + } } }, "com.amazonaws.lakeformation#PutDataLakeSettingsRequest": { @@ -2009,13 +3664,13 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "DataLakeSettings": { "target": "com.amazonaws.lakeformation#DataLakeSettings", "traits": { - "smithy.api#documentation": "

                                                                      A structure representing a list of AWS Lake Formation principals designated as data lake administrators.

                                                                      ", + "smithy.api#documentation": "

                                                                      A structure representing a list of Lake Formation principals designated as data lake administrators.

                                                                      ", "smithy.api#required": {} } } @@ -2025,6 +3680,93 @@ "type": "structure", "members": {} }, + "com.amazonaws.lakeformation#QueryIdString": { + "type": "string" + }, + "com.amazonaws.lakeformation#QueryParameterMap": { + "type": "map", + "key": { + "target": "smithy.api#String" + }, + "value": { + "target": "smithy.api#String" + } + }, + "com.amazonaws.lakeformation#QueryPlanningContext": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Data Catalog where the partition in question resides. If none is provided, the Amazon Web Services account ID is used by default.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#QueryPlanningContextDatabaseNameString", + "traits": { + "smithy.api#documentation": "

                                                                      The database containing the table.

                                                                      ", + "smithy.api#required": {} + } + }, + "QueryAsOfTime": { + "target": "com.amazonaws.lakeformation#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time as of when to read the table contents. If not set, the most recent transaction commit time will be used. Cannot be specified along with TransactionId.

                                                                      " + } + }, + "QueryParameters": { + "target": "com.amazonaws.lakeformation#QueryParameterMap", + "traits": { + "smithy.api#documentation": "

                                                                      A map consisting of key-value pairs.

                                                                      " + } + }, + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction ID at which to read the table contents. If this transaction is not committed, the read will be treated as part of that transaction and will see its writes. If this transaction has aborted, an error will be returned. If not set, defaults to the most recent committed transaction. Cannot be specified along with QueryAsOfTime.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing information about the query plan.

                                                                      " + } + }, + "com.amazonaws.lakeformation#QueryPlanningContextDatabaseNameString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" + } + }, + "com.amazonaws.lakeformation#QueryStateString": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "PENDING", + "name": "PENDING" + }, + { + "value": "WORKUNITS_AVAILABLE", + "name": "WORKUNITS_AVAILABLE" + }, + { + "value": "ERROR", + "name": "ERROR" + }, + { + "value": "FINISHED", + "name": "FINISHED" + }, + { + "value": "EXPIRED", + "name": "EXPIRED" + } + ] + } + }, "com.amazonaws.lakeformation#RAMResourceShareArn": { "type": "string" }, @@ -2037,9 +3779,15 @@ "target": "com.amazonaws.lakeformation#RegisterResourceResponse" }, "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, { "target": "com.amazonaws.lakeformation#AlreadyExistsException" }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, { "target": "com.amazonaws.lakeformation#InternalServiceException" }, @@ -2048,10 +3796,18 @@ }, { "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#ResourceNumberLimitExceededException" } ], "traits": { - "smithy.api#documentation": "

                                                                      Registers the resource as managed by the Data Catalog.

                                                                      \n\t\n\t

                                                                      To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                                                      \n\n

                                                                      The following request registers a new location and gives AWS Lake Formation permission to use the service-linked role to access that location.

                                                                      \n\n

                                                                      \n ResourceArn = arn:aws:s3:::my-bucket\nUseServiceLinkedRole = true\n

                                                                      \n\t\n\t

                                                                      If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn:

                                                                      \n\t\n

                                                                      \n arn:aws:iam::12345:role/my-data-access-role\n

                                                                      " + "smithy.api#documentation": "

                                                                      Registers the resource as managed by the Data Catalog.

                                                                      \n\t\n

                                                                      To add or update data, Lake Formation needs read/write access to the chosen Amazon S3 path. Choose a role that you know has permission to do this, or choose the AWSServiceRoleForLakeFormationDataAccess service-linked role. When you register the first Amazon S3 path, the service-linked role and a new inline policy are created on your behalf. Lake Formation adds the first path to the inline policy and attaches it to the service-linked role. When you register subsequent paths, Lake Formation adds the path to the existing policy.

                                                                      \n\n

                                                                      The following request registers a new location and gives Lake Formation permission to use the service-linked role to access that location.

                                                                      \n\n

                                                                      \n ResourceArn = arn:aws:s3:::my-bucket\nUseServiceLinkedRole = true\n

                                                                      \n\t\n\t

                                                                      If UseServiceLinkedRole is not set to true, you must provide or set the RoleArn:

                                                                      \n\t\n

                                                                      \n arn:aws:iam::12345:role/my-data-access-role\n

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/RegisterResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#RegisterResourceRequest": { @@ -2067,7 +3823,7 @@ "UseServiceLinkedRole": { "target": "com.amazonaws.lakeformation#NullableBoolean", "traits": { - "smithy.api#documentation": "

                                                                      Designates an AWS Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

                                                                      \n \n

                                                                      For more information, see Using Service-Linked Roles for Lake Formation.

                                                                      " + "smithy.api#documentation": "

                                                                      Designates an Identity and Access Management (IAM) service-linked role by registering this role with the Data Catalog. A service-linked role is a unique type of IAM role that is linked directly to Lake Formation.

                                                                      \n \n

                                                                      For more information, see Using Service-Linked Roles for Lake Formation.

                                                                      " } }, "RoleArn": { @@ -2114,7 +3870,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Removes a tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                                                      " + "smithy.api#documentation": "

                                                                      Removes an LF-tag from the resource. Only database, table, or tableWithColumns resource are allowed. To tag columns, use the column inclusion list in tableWithColumns to specify column input.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/RemoveLFTagsFromResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#RemoveLFTagsFromResourceRequest": { @@ -2123,20 +3884,20 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Resource": { "target": "com.amazonaws.lakeformation#Resource", "traits": { - "smithy.api#documentation": "

                                                                      The resource where you want to remove a tag.

                                                                      ", + "smithy.api#documentation": "

                                                                      The database, table, or column resource where you want to remove an LF-tag.

                                                                      ", "smithy.api#required": {} } }, "LFTags": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      The tags to be removed from the resource.

                                                                      ", + "smithy.api#documentation": "

                                                                      The LF-tags to be removed from the resource.

                                                                      ", "smithy.api#required": {} } } @@ -2159,7 +3920,7 @@ "Catalog": { "target": "com.amazonaws.lakeformation#CatalogResource", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Database": { @@ -2186,16 +3947,22 @@ "smithy.api#documentation": "

                                                                      The location of an Amazon S3 path where permissions are granted or revoked.

                                                                      " } }, + "DataCellsFilter": { + "target": "com.amazonaws.lakeformation#DataCellsFilterResource", + "traits": { + "smithy.api#documentation": "

                                                                      A data cell filter.

                                                                      " + } + }, "LFTag": { "target": "com.amazonaws.lakeformation#LFTagKeyResource", "traits": { - "smithy.api#documentation": "

                                                                      The tag key and values attached to a resource.

                                                                      " + "smithy.api#documentation": "

                                                                      The LF-tag key and values attached to a resource.

                                                                      " } }, "LFTagPolicy": { "target": "com.amazonaws.lakeformation#LFTagPolicyResource", "traits": { - "smithy.api#documentation": "

                                                                      A list of tag conditions that define a resource's tag policy.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tag conditions that define a resource's LF-tag policy.

                                                                      " } } }, @@ -2229,7 +3996,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure containing information about an AWS Lake Formation resource.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing information about an Lake Formation resource.

                                                                      " } }, "com.amazonaws.lakeformation#ResourceInfoList": { @@ -2238,6 +4005,22 @@ "target": "com.amazonaws.lakeformation#ResourceInfo" } }, + "com.amazonaws.lakeformation#ResourceNotReadyException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error related to a resource which is not ready for a transaction.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, "com.amazonaws.lakeformation#ResourceNumberLimitExceededException": { "type": "structure", "members": { @@ -2289,6 +4072,15 @@ ] } }, + "com.amazonaws.lakeformation#Result": { + "type": "string" + }, + "com.amazonaws.lakeformation#ResultStream": { + "type": "blob", + "traits": { + "smithy.api#streaming": {} + } + }, "com.amazonaws.lakeformation#RevokePermissions": { "type": "operation", "input": { @@ -2309,7 +4101,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Revokes permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

                                                                      " + "smithy.api#documentation": "

                                                                      Revokes permissions to the principal to access metadata in the Data Catalog and data organized in underlying data storage such as Amazon S3.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/RevokePermissions", + "code": 200 + } } }, "com.amazonaws.lakeformation#RevokePermissionsRequest": { @@ -2318,7 +4115,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Principal": { @@ -2354,6 +4151,26 @@ "type": "structure", "members": {} }, + "com.amazonaws.lakeformation#RowFilter": { + "type": "structure", + "members": { + "FilterExpression": { + "target": "com.amazonaws.lakeformation#PredicateString", + "traits": { + "smithy.api#documentation": "

                                                                      A filter expression.

                                                                      " + } + }, + "AllRowsWildcard": { + "target": "com.amazonaws.lakeformation#AllRowsWildcard", + "traits": { + "smithy.api#documentation": "

                                                                      A wildcard for all rows.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A PartiQL predicate.

                                                                      " + } + }, "com.amazonaws.lakeformation#SearchDatabasesByLFTags": { "type": "operation", "input": { @@ -2383,7 +4200,18 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      This operation allows a search on DATABASE resources by TagCondition. This operation is used by admins who want to grant user permissions on certain TagConditions. Before making a grant, the admin can use SearchDatabasesByTags to find all resources where the given TagConditions are valid to verify whether the returned resources can be shared.

                                                                      " + "smithy.api#documentation": "

                                                                      This operation allows a search on DATABASE resources by TagCondition. This operation is used by admins who want to grant user permissions on certain TagConditions. Before making a grant, the admin can use SearchDatabasesByTags to find all resources where the given TagConditions are valid to verify whether the returned resources can be shared.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/SearchDatabasesByLFTags", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "DatabaseList", + "pageSize": "MaxResults" + } } }, "com.amazonaws.lakeformation#SearchDatabasesByLFTagsRequest": { @@ -2404,7 +4232,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Expression": { @@ -2428,7 +4256,7 @@ "DatabaseList": { "target": "com.amazonaws.lakeformation#DatabaseLFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of databases that meet the tag conditions.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of databases that meet the LF-tag conditions.

                                                                      " } } } @@ -2462,7 +4290,18 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LFTags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                                                      " + "smithy.api#documentation": "

                                                                      This operation allows a search on TABLE resources by LFTags. This will be used by admins who want to grant user permissions on certain LF-tags. Before making a grant, the admin can use SearchTablesByLFTags to find all resources where the given LFTags are valid to verify whether the returned resources can be shared.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/SearchTablesByLFTags", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "TableList", + "pageSize": "MaxResults" + } } }, "com.amazonaws.lakeformation#SearchTablesByLFTagsRequest": { @@ -2483,7 +4322,7 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "Expression": { @@ -2507,78 +4346,327 @@ "TableList": { "target": "com.amazonaws.lakeformation#TableLFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tables that meet the tag conditions.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of tables that meet the LF-tag conditions.

                                                                      " } } } }, - "com.amazonaws.lakeformation#StringValue": { - "type": "string" - }, - "com.amazonaws.lakeformation#StringValueList": { - "type": "list", - "member": { - "target": "com.amazonaws.lakeformation#StringValue" - } - }, - "com.amazonaws.lakeformation#TableLFTagsList": { - "type": "list", - "member": { - "target": "com.amazonaws.lakeformation#TaggedTable" + "com.amazonaws.lakeformation#StartQueryPlanning": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#StartQueryPlanningRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#StartQueryPlanningResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#ThrottledException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Submits a request to process a query statement.

                                                                      \n\t\n\t

                                                                      This operation generates work units that can be retrieved with the GetWorkUnits operation as soon as the query state is WORKUNITS_AVAILABLE or FINISHED.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "query-" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/StartQueryPlanning", + "code": 200 + } } }, - "com.amazonaws.lakeformation#TableResource": { + "com.amazonaws.lakeformation#StartQueryPlanningRequest": { "type": "structure", "members": { - "CatalogId": { - "target": "com.amazonaws.lakeformation#CatalogIdString", + "QueryPlanningContext": { + "target": "com.amazonaws.lakeformation#QueryPlanningContext", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, it is the account ID of the caller.

                                                                      " - } - }, - "DatabaseName": { - "target": "com.amazonaws.lakeformation#NameString", - "traits": { - "smithy.api#documentation": "

                                                                      The name of the database for the table. Unique to a Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

                                                                      ", + "smithy.api#documentation": "

                                                                      A structure containing information about the query plan.

                                                                      ", "smithy.api#required": {} } }, - "Name": { - "target": "com.amazonaws.lakeformation#NameString", + "QueryString": { + "target": "com.amazonaws.lakeformation#SyntheticStartQueryPlanningRequestQueryString", "traits": { - "smithy.api#documentation": "

                                                                      The name of the table.

                                                                      " + "smithy.api#documentation": "

                                                                      A PartiQL query statement used as an input to the planner service.

                                                                      ", + "smithy.api#required": {} } - }, - "TableWildcard": { - "target": "com.amazonaws.lakeformation#TableWildcard", + } + } + }, + "com.amazonaws.lakeformation#StartQueryPlanningResponse": { + "type": "structure", + "members": { + "QueryId": { + "target": "com.amazonaws.lakeformation#QueryIdString", "traits": { - "smithy.api#documentation": "

                                                                      A wildcard object representing every table under a database.

                                                                      \n\n

                                                                      At least one of TableResource$Name or TableResource$TableWildcard is required.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the plan query operation can be used to fetch the actual work unit descriptors that are produced as the result of the operation. The ID is also used to get the query state and as an input to the Execute operation.

                                                                      ", + "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure for the table object. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure for the output.

                                                                      " } }, - "com.amazonaws.lakeformation#TableWildcard": { - "type": "structure", - "members": {}, + "com.amazonaws.lakeformation#StartTransaction": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#StartTransactionRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#StartTransactionResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + } + ], "traits": { - "smithy.api#documentation": "

                                                                      A wildcard object representing every table under a database.

                                                                      " + "smithy.api#documentation": "

                                                                      Starts a new transaction and returns its transaction ID. Transaction IDs are opaque objects that you can use to identify a transaction.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/StartTransaction", + "code": 200 + } } }, - "com.amazonaws.lakeformation#TableWithColumnsResource": { + "com.amazonaws.lakeformation#StartTransactionRequest": { "type": "structure", "members": { - "CatalogId": { - "target": "com.amazonaws.lakeformation#CatalogIdString", + "TransactionType": { + "target": "com.amazonaws.lakeformation#TransactionType", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, it is the account ID of the caller.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether this transaction should be read only or read and write. Writes made using a read-only transaction ID will be rejected. Read-only transactions do not need to be committed.

                                                                      " } - }, - "DatabaseName": { - "target": "com.amazonaws.lakeformation#NameString", - "traits": { + } + } + }, + "com.amazonaws.lakeformation#StartTransactionResponse": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      An opaque identifier for the transaction.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#StatisticsNotReadyYetException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error related to statistics not being ready.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 420 + } + }, + "com.amazonaws.lakeformation#StorageOptimizer": { + "type": "structure", + "members": { + "StorageOptimizerType": { + "target": "com.amazonaws.lakeformation#OptimizerType", + "traits": { + "smithy.api#documentation": "

                                                                      The specific type of storage optimizer. The supported value is compaction.

                                                                      " + } + }, + "Config": { + "target": "com.amazonaws.lakeformation#StorageOptimizerConfig", + "traits": { + "smithy.api#documentation": "

                                                                      A map of the storage optimizer configuration. Currently contains only one key-value pair: is_enabled indicates true or false for acceleration.

                                                                      " + } + }, + "ErrorMessage": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message that contains information about any error (if present).

                                                                      \n\t\n\t

                                                                      When an acceleration result has an enabled status, the error message is empty.

                                                                      \n\t

                                                                      When an acceleration result has a disabled status, the message describes an error or simply indicates \"disabled by the user\".

                                                                      " + } + }, + "Warnings": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message that contains information about any warnings (if present).

                                                                      " + } + }, + "LastRunDetails": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      When an acceleration result has an enabled status, contains the details of the last job run.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure describing the configuration and details of a storage optimizer.

                                                                      " + } + }, + "com.amazonaws.lakeformation#StorageOptimizerConfig": { + "type": "map", + "key": { + "target": "com.amazonaws.lakeformation#StorageOptimizerConfigKey" + }, + "value": { + "target": "com.amazonaws.lakeformation#StorageOptimizerConfigValue" + } + }, + "com.amazonaws.lakeformation#StorageOptimizerConfigKey": { + "type": "string" + }, + "com.amazonaws.lakeformation#StorageOptimizerConfigMap": { + "type": "map", + "key": { + "target": "com.amazonaws.lakeformation#OptimizerType" + }, + "value": { + "target": "com.amazonaws.lakeformation#StorageOptimizerConfig" + } + }, + "com.amazonaws.lakeformation#StorageOptimizerConfigValue": { + "type": "string" + }, + "com.amazonaws.lakeformation#StorageOptimizerList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#StorageOptimizer" + } + }, + "com.amazonaws.lakeformation#StringValue": { + "type": "string" + }, + "com.amazonaws.lakeformation#StringValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#StringValue" + } + }, + "com.amazonaws.lakeformation#SyntheticGetWorkUnitResultsRequestWorkUnitTokenString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.lakeformation#SyntheticStartQueryPlanningRequestQueryString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1 + }, + "smithy.api#sensitive": {} + } + }, + "com.amazonaws.lakeformation#TableLFTagsList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#TaggedTable" + } + }, + "com.amazonaws.lakeformation#TableObject": { + "type": "structure", + "members": { + "Uri": { + "target": "com.amazonaws.lakeformation#URI", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 location of the object.

                                                                      " + } + }, + "ETag": { + "target": "com.amazonaws.lakeformation#ETagString", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon S3 ETag of the object. Returned by GetTableObjects for validation and used to identify changes to the underlying data.

                                                                      " + } + }, + "Size": { + "target": "com.amazonaws.lakeformation#ObjectSize", + "traits": { + "smithy.api#documentation": "

                                                                      The size of the Amazon S3 object in bytes.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the details of a governed table.

                                                                      " + } + }, + "com.amazonaws.lakeformation#TableObjectList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#TableObject" + } + }, + "com.amazonaws.lakeformation#TableResource": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, it is the account ID of the caller.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the database for the table. Unique to a Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

                                                                      ", + "smithy.api#required": {} + } + }, + "Name": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the table.

                                                                      " + } + }, + "TableWildcard": { + "target": "com.amazonaws.lakeformation#TableWildcard", + "traits": { + "smithy.api#documentation": "

                                                                      A wildcard object representing every table under a database.

                                                                      \n\n

                                                                      At least one of TableResource$Name or TableResource$TableWildcard is required.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure for the table object. A table is a metadata definition that represents your data. You can Grant and Revoke table privileges to a principal.

                                                                      " + } + }, + "com.amazonaws.lakeformation#TableWildcard": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

                                                                      A wildcard object representing every table under a database.

                                                                      " + } + }, + "com.amazonaws.lakeformation#TableWithColumnsResource": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, it is the account ID of the caller.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { "smithy.api#documentation": "

                                                                      The name of the database for the table with columns resource. Unique to the Data Catalog. A database is a set of associated table definitions organized into a logical group. You can Grant and Revoke database privileges to a principal.

                                                                      ", "smithy.api#required": {} } @@ -2625,18 +4713,18 @@ "Database": { "target": "com.amazonaws.lakeformation#DatabaseResource", "traits": { - "smithy.api#documentation": "

                                                                      A database that has tags attached to it.

                                                                      " + "smithy.api#documentation": "

                                                                      A database that has LF-tags attached to it.

                                                                      " } }, "LFTags": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags attached to the database.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags attached to the database.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure describing a database resource with tags.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure describing a database resource with LF-tags.

                                                                      " } }, "com.amazonaws.lakeformation#TaggedTable": { @@ -2645,41 +4733,253 @@ "Table": { "target": "com.amazonaws.lakeformation#TableResource", "traits": { - "smithy.api#documentation": "

                                                                      A table that has tags attached to it.

                                                                      " + "smithy.api#documentation": "

                                                                      A table that has LF-tags attached to it.

                                                                      " } }, "LFTagOnDatabase": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags attached to the database where the table resides.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags attached to the database where the table resides.

                                                                      " } }, "LFTagsOnTable": { "target": "com.amazonaws.lakeformation#LFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags attached to the table.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags attached to the table.

                                                                      " } }, "LFTagsOnColumns": { "target": "com.amazonaws.lakeformation#ColumnLFTagsList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tags attached to columns in the table.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tags attached to columns in the table.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      A structure describing a table resource with tags.

                                                                      " + "smithy.api#documentation": "

                                                                      A structure describing a table resource with LF-tags.

                                                                      " } }, + "com.amazonaws.lakeformation#ThrottledException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error where the query request was throttled.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.lakeformation#Timestamp": { + "type": "timestamp" + }, "com.amazonaws.lakeformation#Token": { "type": "string" }, + "com.amazonaws.lakeformation#TokenString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 4096 + } + } + }, + "com.amazonaws.lakeformation#TransactionCanceledException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error related to a transaction that was cancelled.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.lakeformation#TransactionCommitInProgressException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error related to a transaction commit that was in progress.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.lakeformation#TransactionCommittedException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error where the specified transaction has already been committed and cannot be used for UpdateTableObjects.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.lakeformation#TransactionDescription": { + "type": "structure", + "members": { + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the transaction.

                                                                      " + } + }, + "TransactionStatus": { + "target": "com.amazonaws.lakeformation#TransactionStatus", + "traits": { + "smithy.api#documentation": "

                                                                      A status of ACTIVE, COMMITTED, or ABORTED.

                                                                      " + } + }, + "TransactionStartTime": { + "target": "com.amazonaws.lakeformation#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time when the transaction started.

                                                                      " + } + }, + "TransactionEndTime": { + "target": "com.amazonaws.lakeformation#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The time when the transaction committed or aborted, if it is not currently active.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about a transaction.

                                                                      " + } + }, + "com.amazonaws.lakeformation#TransactionDescriptionList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#TransactionDescription" + } + }, + "com.amazonaws.lakeformation#TransactionIdString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^[\\p{L}\\p{N}\\p{P}]*$" + } + }, + "com.amazonaws.lakeformation#TransactionStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "COMMITTED", + "name": "COMMITTED" + }, + { + "value": "ABORTED", + "name": "ABORTED" + }, + { + "value": "COMMIT_IN_PROGRESS", + "name": "COMMIT_IN_PROGRESS" + } + ] + } + }, + "com.amazonaws.lakeformation#TransactionStatusFilter": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "COMPLETED", + "name": "COMPLETED" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + }, + { + "value": "COMMITTED", + "name": "COMMITTED" + }, + { + "value": "ABORTED", + "name": "ABORTED" + } + ] + } + }, + "com.amazonaws.lakeformation#TransactionType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "READ_AND_WRITE", + "name": "READ_AND_WRITE" + }, + { + "value": "READ_ONLY", + "name": "READ_ONLY" + } + ] + } + }, + "com.amazonaws.lakeformation#TrueFalseString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\t]*$" + } + }, "com.amazonaws.lakeformation#TrustedResourceOwners": { "type": "list", "member": { "target": "com.amazonaws.lakeformation#CatalogIdString" } }, + "com.amazonaws.lakeformation#URI": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": "^[\\u0020-\\uD7FF\\uE000-\\uFFFD\\uD800\\uDC00-\\uDBFF\\uDFFF\\r\\n\\t]*$" + } + }, "com.amazonaws.lakeformation#UpdateLFTag": { "type": "operation", "input": { @@ -2709,7 +5009,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates the list of possible values for the specified tag key. If the tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - \"Update not allowed\". Untag the attribute before deleting the tag key's value.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the list of possible values for the specified LF-tag key. If the LF-tag does not exist, the operation throws an EntityNotFoundException. The values in the delete key values will be deleted from list of possible values. If any value in the delete key values is attached to a resource, then API errors out with a 400 Exception - \"Update not allowed\". Untag the attribute before deleting the LF-tag key's value.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/UpdateLFTag", + "code": 200 + } } }, "com.amazonaws.lakeformation#UpdateLFTagRequest": { @@ -2718,26 +5023,26 @@ "CatalogId": { "target": "com.amazonaws.lakeformation#CatalogIdString", "traits": { - "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your AWS Lake Formation environment.

                                                                      " + "smithy.api#documentation": "

                                                                      The identifier for the Data Catalog. By default, the account ID. The Data Catalog is the persistent metadata store. It contains database definitions, table definitions, and other control information to manage your Lake Formation environment.

                                                                      " } }, "TagKey": { "target": "com.amazonaws.lakeformation#LFTagKey", "traits": { - "smithy.api#documentation": "

                                                                      The key-name for the tag for which to add or delete values.

                                                                      ", + "smithy.api#documentation": "

                                                                      The key-name for the LF-tag for which to add or delete values.

                                                                      ", "smithy.api#required": {} } }, "TagValuesToDelete": { "target": "com.amazonaws.lakeformation#TagValueList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tag values to delete from the tag.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tag values to delete from the LF-tag.

                                                                      " } }, "TagValuesToAdd": { "target": "com.amazonaws.lakeformation#TagValueList", "traits": { - "smithy.api#documentation": "

                                                                      A list of tag values to add from the tag.

                                                                      " + "smithy.api#documentation": "

                                                                      A list of LF-tag values to add from the LF-tag.

                                                                      " } } } @@ -2769,7 +5074,12 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates the data access role used for vending access to the given (registered) resource in AWS Lake Formation.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the data access role used for vending access to the given (registered) resource in Lake Formation.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/UpdateResource", + "code": 200 + } } }, "com.amazonaws.lakeformation#UpdateResourceRequest": { @@ -2778,7 +5088,7 @@ "RoleArn": { "target": "com.amazonaws.lakeformation#IAMRoleArn", "traits": { - "smithy.api#documentation": "

                                                                      The new role to use for the given resource registered in AWS Lake Formation.

                                                                      ", + "smithy.api#documentation": "

                                                                      The new role to use for the given resource registered in Lake Formation.

                                                                      ", "smithy.api#required": {} } }, @@ -2794,6 +5104,291 @@ "com.amazonaws.lakeformation#UpdateResourceResponse": { "type": "structure", "members": {} + }, + "com.amazonaws.lakeformation#UpdateTableObjects": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#UpdateTableObjectsRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#UpdateTableObjectsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#ConcurrentModificationException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + }, + { + "target": "com.amazonaws.lakeformation#OperationTimeoutException" + }, + { + "target": "com.amazonaws.lakeformation#ResourceNotReadyException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCanceledException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommitInProgressException" + }, + { + "target": "com.amazonaws.lakeformation#TransactionCommittedException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the manifest of Amazon S3 objects that make up the specified governed table.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/UpdateTableObjects", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#UpdateTableObjectsRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The catalog containing the governed table to update. Defaults to the caller’s account ID.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The database containing the governed table to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      The governed table to update.

                                                                      ", + "smithy.api#required": {} + } + }, + "TransactionId": { + "target": "com.amazonaws.lakeformation#TransactionIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The transaction at which to do the write.

                                                                      ", + "smithy.api#required": {} + } + }, + "WriteOperations": { + "target": "com.amazonaws.lakeformation#WriteOperationList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of WriteOperation objects that define an object to add to or delete from the manifest for a governed table.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#UpdateTableObjectsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.lakeformation#UpdateTableStorageOptimizer": { + "type": "operation", + "input": { + "target": "com.amazonaws.lakeformation#UpdateTableStorageOptimizerRequest" + }, + "output": { + "target": "com.amazonaws.lakeformation#UpdateTableStorageOptimizerResponse" + }, + "errors": [ + { + "target": "com.amazonaws.lakeformation#AccessDeniedException" + }, + { + "target": "com.amazonaws.lakeformation#EntityNotFoundException" + }, + { + "target": "com.amazonaws.lakeformation#InternalServiceException" + }, + { + "target": "com.amazonaws.lakeformation#InvalidInputException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the configuration of the storage optimizers for a table.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/UpdateTableStorageOptimizer", + "code": 200 + } + } + }, + "com.amazonaws.lakeformation#UpdateTableStorageOptimizerRequest": { + "type": "structure", + "members": { + "CatalogId": { + "target": "com.amazonaws.lakeformation#CatalogIdString", + "traits": { + "smithy.api#documentation": "

                                                                      The Catalog ID of the table.

                                                                      " + } + }, + "DatabaseName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the database where the table is present.

                                                                      ", + "smithy.api#required": {} + } + }, + "TableName": { + "target": "com.amazonaws.lakeformation#NameString", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the table for which to enable the storage optimizer.

                                                                      ", + "smithy.api#required": {} + } + }, + "StorageOptimizerConfig": { + "target": "com.amazonaws.lakeformation#StorageOptimizerConfigMap", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the table for which to enable the storage optimizer.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.lakeformation#UpdateTableStorageOptimizerResponse": { + "type": "structure", + "members": { + "Result": { + "target": "com.amazonaws.lakeformation#Result", + "traits": { + "smithy.api#documentation": "

                                                                      A response indicating the success of failure of the operation.

                                                                      " + } + } + } + }, + "com.amazonaws.lakeformation#VirtualObject": { + "type": "structure", + "members": { + "Uri": { + "target": "com.amazonaws.lakeformation#URI", + "traits": { + "smithy.api#documentation": "

                                                                      The path to the Amazon S3 object. Must start with s3://

                                                                      ", + "smithy.api#required": {} + } + }, + "ETag": { + "target": "com.amazonaws.lakeformation#ETagString", + "traits": { + "smithy.api#documentation": "

                                                                      The ETag of the Amazon S3 object.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that defines an Amazon S3 object to be deleted if a transaction cancels, provided that \n VirtualPut was called before writing the object.

                                                                      " + } + }, + "com.amazonaws.lakeformation#VirtualObjectList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#VirtualObject" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } + }, + "com.amazonaws.lakeformation#WorkUnitIdLong": { + "type": "long" + }, + "com.amazonaws.lakeformation#WorkUnitRange": { + "type": "structure", + "members": { + "WorkUnitIdMax": { + "target": "com.amazonaws.lakeformation#WorkUnitIdLong", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the maximum work unit ID in the range. The maximum value is inclusive.

                                                                      ", + "smithy.api#required": {} + } + }, + "WorkUnitIdMin": { + "target": "com.amazonaws.lakeformation#WorkUnitIdLong", + "traits": { + "smithy.api#documentation": "

                                                                      Defines the minimum work unit ID in the range.

                                                                      ", + "smithy.api#required": {} + } + }, + "WorkUnitToken": { + "target": "com.amazonaws.lakeformation#WorkUnitTokenString", + "traits": { + "smithy.api#documentation": "

                                                                      A work token used to query the execution service.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Defines the valid range of work unit IDs for querying the execution service.

                                                                      " + } + }, + "com.amazonaws.lakeformation#WorkUnitRangeList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#WorkUnitRange" + } + }, + "com.amazonaws.lakeformation#WorkUnitTokenString": { + "type": "string" + }, + "com.amazonaws.lakeformation#WorkUnitsNotReadyYetException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.lakeformation#MessageString", + "traits": { + "smithy.api#documentation": "

                                                                      A message describing the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Contains details about an error related to work units not being ready.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 420 + } + }, + "com.amazonaws.lakeformation#WriteOperation": { + "type": "structure", + "members": { + "AddObject": { + "target": "com.amazonaws.lakeformation#AddObjectInput", + "traits": { + "smithy.api#documentation": "

                                                                      A new object to add to the governed table.

                                                                      " + } + }, + "DeleteObject": { + "target": "com.amazonaws.lakeformation#DeleteObjectInput", + "traits": { + "smithy.api#documentation": "

                                                                      An object to delete from the governed table.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Defines an object to add to or delete from a governed table.

                                                                      " + } + }, + "com.amazonaws.lakeformation#WriteOperationList": { + "type": "list", + "member": { + "target": "com.amazonaws.lakeformation#WriteOperation" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 100 + } + } } } } diff --git a/codegen/sdk-codegen/aws-models/outposts.json b/codegen/sdk-codegen/aws-models/outposts.json index 1b363073af8b..b179c592e390 100644 --- a/codegen/sdk-codegen/aws-models/outposts.json +++ b/codegen/sdk-codegen/aws-models/outposts.json @@ -599,6 +599,12 @@ "traits": { "smithy.api#documentation": "

                                                                      The tags to apply to the Outpost.

                                                                      " } + }, + "SupportedHardwareType": { + "target": "com.amazonaws.outposts#SupportedHardwareType", + "traits": { + "smithy.api#documentation": "

                                                                      \n The type of hardware for this Outpost.\n

                                                                      " + } } } }, @@ -2125,6 +2131,12 @@ }, "SiteArn": { "target": "com.amazonaws.outposts#SiteArn" + }, + "SupportedHardwareType": { + "target": "com.amazonaws.outposts#SupportedHardwareType", + "traits": { + "smithy.api#documentation": "

                                                                      \n The hardware type. \n

                                                                      " + } } }, "traits": { @@ -2640,6 +2652,21 @@ "smithy.api#pattern": "^[\\S \\n]+$" } }, + "com.amazonaws.outposts#SupportedHardwareType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "RACK", + "name": "RACK" + }, + { + "value": "SERVER", + "name": "SERVER" + } + ] + } + }, "com.amazonaws.outposts#SupportedStorageEnum": { "type": "string", "traits": { diff --git a/codegen/sdk-codegen/aws-models/rbin.json b/codegen/sdk-codegen/aws-models/rbin.json new file mode 100644 index 000000000000..9c6f1bff52c1 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/rbin.json @@ -0,0 +1,1014 @@ +{ + "smithy": "1.0", + "metadata": { + "suppressions": [ + { + "id": "HttpMethodSemantics", + "namespace": "*" + }, + { + "id": "HttpResponseCodeSemantics", + "namespace": "*" + }, + { + "id": "PaginatedTrait", + "namespace": "*" + }, + { + "id": "HttpHeaderTrait", + "namespace": "*" + }, + { + "id": "HttpUriConflict", + "namespace": "*" + }, + { + "id": "Service", + "namespace": "*" + } + ] + }, + "shapes": { + "com.amazonaws.rbin#AmazonRecycleBin": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "rbin", + "arnNamespace": "rbin", + "cloudFormationName": "Rbin", + "cloudTrailEventSource": "rbin.amazonaws.com", + "endpointPrefix": "rbin" + }, + "aws.auth#sigv4": { + "name": "rbin" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

                                                                      This is the Recycle Bin API Reference. This documentation provides \n descriptions and syntax for each of the actions and data types in Recycle Bin.

                                                                      \n \n

                                                                      Recycle Bin is a snapshot recovery feature that enables you to restore accidentally \n deleted snapshots. When using Recycle Bin, if your snapshots are deleted, they are retained \n in the Recycle Bin for a time period that you specify.

                                                                      \n \n

                                                                      You can restore a snapshot from the Recycle Bin at any time before its retention period \n expires. After you restore a snapshot from the Recycle Bin, the snapshot is removed from the \n Recycle Bin, and you can then use it in the same way you use any other snapshot in your \n account. If the retention period expires and the snapshot is not restored, the snapshot is \n permanently deleted from the Recycle Bin and is no longer available for recovery. For more \n information about Recycle Bin, see \n Recycle Bin in the Amazon EC2 User Guide.

                                                                      ", + "smithy.api#title": "Amazon Recycle Bin" + }, + "version": "2021-06-15", + "operations": [ + { + "target": "com.amazonaws.rbin#CreateRule" + }, + { + "target": "com.amazonaws.rbin#DeleteRule" + }, + { + "target": "com.amazonaws.rbin#GetRule" + }, + { + "target": "com.amazonaws.rbin#ListRules" + }, + { + "target": "com.amazonaws.rbin#ListTagsForResource" + }, + { + "target": "com.amazonaws.rbin#TagResource" + }, + { + "target": "com.amazonaws.rbin#UntagResource" + }, + { + "target": "com.amazonaws.rbin#UpdateRule" + } + ] + }, + "com.amazonaws.rbin#CreateRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#CreateRuleRequest" + }, + "output": { + "target": "com.amazonaws.rbin#CreateRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a Recycle Bin retention rule. For more information, see \n Create Recycle Bin retention rules in the Amazon EC2 User Guide.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/rules", + "code": 201 + } + } + }, + "com.amazonaws.rbin#CreateRuleRequest": { + "type": "structure", + "members": { + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the retention period for which the retention rule is to retain resources.

                                                                      ", + "smithy.api#required": {} + } + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      A brief description for the retention rule.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.rbin#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the tags to assign to the retention rule.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are \n supported.

                                                                      ", + "smithy.api#required": {} + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the resource tags to use to identify resources that are to be retained \n by the retention rule. The retention rule retains only deleted snapshots that have one or more \n of the specified tag key and value pairs. If a snapshot is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained \n by the retention rule.

                                                                      \n

                                                                      You can add the same tag key and value pair to a maximum or five retention rules.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#CreateRuleResponse": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique identifier of the retention rule.

                                                                      " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod" + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The retention rule description.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.rbin#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags assigned to the retention rule.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type retained by the retention rule.

                                                                      " + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the resource tags used to identify resources that are retained by the retention \n rule.

                                                                      " + } + }, + "Status": { + "target": "com.amazonaws.rbin#RuleStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#DeleteRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#DeleteRuleRequest" + }, + "output": { + "target": "com.amazonaws.rbin#DeleteRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a Recycle Bin retention rule. For more information, see \n Delete Recycle Bin retention rules in the Amazon EC2 User Guide.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/rules/{Identifier}", + "code": 204 + } + } + }, + "com.amazonaws.rbin#DeleteRuleRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rbin#DeleteRuleResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rbin#Description": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\S ]{0,255}$" + } + }, + "com.amazonaws.rbin#ErrorMessage": { + "type": "string" + }, + "com.amazonaws.rbin#GetRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#GetRuleRequest" + }, + "output": { + "target": "com.amazonaws.rbin#GetRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets information about a Recycle Bin retention rule.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/rules/{Identifier}", + "code": 200 + } + } + }, + "com.amazonaws.rbin#GetRuleRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rbin#GetRuleResponse": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule.

                                                                      " + } + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description assigned to the retention rule.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type retained by the retention rule. Currently, only Amazon EBS snapshots are supported.

                                                                      " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the period for which the retention rule retains resources.

                                                                      " + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      The resource tags used to identify resources that are to be retained by the retention rule.

                                                                      " + } + }, + "Status": { + "target": "com.amazonaws.rbin#RuleStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#InternalServerException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rbin#ErrorMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The service could not respond to the request due to an internal problem.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.rbin#ListRules": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#ListRulesRequest" + }, + "output": { + "target": "com.amazonaws.rbin#ListRulesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the Recycle Bin retention rules in the Region.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/list-rules", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.rbin#ListRulesRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "com.amazonaws.rbin#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return for the request in a single page. The remaining results can be seen by sending another request with the returned nextToken value. This value can be between 5 and 500. If maxResults is given a larger value than 500, you receive an error.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.rbin#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use to retrieve the next page of results.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type retained by the retention rule. Only retention rules that retain the specified resource type \n are listed.

                                                                      ", + "smithy.api#required": {} + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      The tags used to identify resources that are to be retained by the retention rule.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#ListRulesResponse": { + "type": "structure", + "members": { + "Rules": { + "target": "com.amazonaws.rbin#RuleSummaryList", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the retention rules.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.rbin#NextToken", + "traits": { + "smithy.api#documentation": "

                                                                      The token to use to retrieve the next page of results. This value is null when there are no more results to return.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.rbin#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Lists the tags assigned a specific resource.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{ResourceArn}", + "code": 200 + } + } + }, + "com.amazonaws.rbin#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rbin#RuleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource for which to list the tags.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rbin#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "Tags": { + "target": "com.amazonaws.rbin#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the tags assigned to the resource.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 1000 + } + } + }, + "com.amazonaws.rbin#NextToken": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[A-Za-z0-9+/=]{1,2048}$" + } + }, + "com.amazonaws.rbin#ResourceNotFoundException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rbin#ErrorMessage" + }, + "Reason": { + "target": "com.amazonaws.rbin#ResourceNotFoundExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The specified resource was not found.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.rbin#ResourceNotFoundExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "RULE_NOT_FOUND", + "name": "RULE_NOT_FOUND" + } + ] + } + }, + "com.amazonaws.rbin#ResourceTag": { + "type": "structure", + "members": { + "ResourceTagKey": { + "target": "com.amazonaws.rbin#ResourceTagKey", + "traits": { + "smithy.api#documentation": "

                                                                      The tag key.

                                                                      ", + "smithy.api#required": {} + } + }, + "ResourceTagValue": { + "target": "com.amazonaws.rbin#ResourceTagValue", + "traits": { + "smithy.api#documentation": "

                                                                      The tag value.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a resource tag used to identify resources that are to be retained by a Recycle Bin retention rule.

                                                                      " + } + }, + "com.amazonaws.rbin#ResourceTagKey": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\S\\s]{1,128}$" + } + }, + "com.amazonaws.rbin#ResourceTagValue": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[\\S\\s]{0,256}$" + } + }, + "com.amazonaws.rbin#ResourceTags": { + "type": "list", + "member": { + "target": "com.amazonaws.rbin#ResourceTag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.rbin#ResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "EBS_SNAPSHOT", + "name": "EBS_SNAPSHOT" + } + ] + } + }, + "com.amazonaws.rbin#RetentionPeriod": { + "type": "structure", + "members": { + "RetentionPeriodValue": { + "target": "com.amazonaws.rbin#RetentionPeriodValue", + "traits": { + "smithy.api#documentation": "

                                                                      The period value for which the retention rule is to retain resources. The period is measured using \n the unit specified for RetentionPeriodUnit.

                                                                      ", + "smithy.api#required": {} + } + }, + "RetentionPeriodUnit": { + "target": "com.amazonaws.rbin#RetentionPeriodUnit", + "traits": { + "smithy.api#documentation": "

                                                                      The unit of time in which the retention period is measured. Currently, only DAYS \n is supported.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the retention period for which a retention rule is to retain resources.

                                                                      " + } + }, + "com.amazonaws.rbin#RetentionPeriodUnit": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "DAYS", + "name": "DAYS" + } + ] + } + }, + "com.amazonaws.rbin#RetentionPeriodValue": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1, + "max": 3650 + } + } + }, + "com.amazonaws.rbin#RuleArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 1011 + }, + "smithy.api#pattern": "^arn:aws(-[a-z]{1,3}){0,2}:rbin:[a-z\\-0-9]{0,63}:[0-9]{12}:rule/[0-9a-zA-Z]{11}{0,1011}$" + } + }, + "com.amazonaws.rbin#RuleIdentifier": { + "type": "string", + "traits": { + "smithy.api#pattern": "^[0-9a-zA-Z]{11}$" + } + }, + "com.amazonaws.rbin#RuleStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "pending", + "name": "PENDING" + }, + { + "value": "available", + "name": "AVAILABLE" + } + ] + } + }, + "com.amazonaws.rbin#RuleSummary": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule.

                                                                      " + } + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The description for the retention rule.

                                                                      " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the retention period for which the retention rule retains resources

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a Recycle Bin retention rule.

                                                                      " + } + }, + "com.amazonaws.rbin#RuleSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.rbin#RuleSummary" + } + }, + "com.amazonaws.rbin#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rbin#ErrorMessage" + }, + "Reason": { + "target": "com.amazonaws.rbin#ServiceQuotaExceededExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request would cause a service quota for the number of tags per resource to be exceeded.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.rbin#ServiceQuotaExceededExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SERVICE_QUOTA_EXCEEDED", + "name": "SERVICE_QUOTA_EXCEEDED" + } + ] + } + }, + "com.amazonaws.rbin#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.rbin#TagKey", + "traits": { + "smithy.api#documentation": "

                                                                      The tag key.

                                                                      ", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.rbin#TagValue", + "traits": { + "smithy.api#documentation": "

                                                                      The tag value.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about the tags assigned to a Recycle Bin retention rule.

                                                                      " + } + }, + "com.amazonaws.rbin#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.rbin#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.rbin#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.rbin#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.rbin#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.rbin#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.rbin#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Assigns tags to the specified resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{ResourceArn}", + "code": 201 + } + } + }, + "com.amazonaws.rbin#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rbin#RuleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource to which to assign the tags.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.rbin#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the tags to assign to the resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rbin#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rbin#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.rbin#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.rbin#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Unassigns a tag from a resource.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{ResourceArn}", + "code": 204 + } + } + }, + "com.amazonaws.rbin#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rbin#RuleArn", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) of the resource from which to unassign the tags.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.rbin#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the tags to unassign from the resource.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rbin#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rbin#UpdateRule": { + "type": "operation", + "input": { + "target": "com.amazonaws.rbin#UpdateRuleRequest" + }, + "output": { + "target": "com.amazonaws.rbin#UpdateRuleResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rbin#InternalServerException" + }, + { + "target": "com.amazonaws.rbin#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rbin#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates an existing Recycle Bin retention rule. For more information, see \n Update Recycle Bin retention rules in the Amazon EC2 User Guide.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/rules/{Identifier}", + "code": 200 + } + } + }, + "com.amazonaws.rbin#UpdateRuleRequest": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the retention period for which the retention rule is to retain resources.

                                                                      " + } + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The retention rule description.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type to be retained by the retention rule. Currently, only Amazon EBS snapshots are supported.

                                                                      " + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the resource tags to use to identify resources that are to be retained \n by the retention rule. The retention rule retains only deleted snapshots that have one or more \n of the specified tag key and value pairs. If a snapshot is deleted, but it does not have \n any of the specified tag key and value pairs, it is immediately deleted without being retained \n by the retention rule.

                                                                      \n

                                                                      You can add the same tag key and value pair to a maximum or five retention rules.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#UpdateRuleResponse": { + "type": "structure", + "members": { + "Identifier": { + "target": "com.amazonaws.rbin#RuleIdentifier", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the retention rule.

                                                                      " + } + }, + "RetentionPeriod": { + "target": "com.amazonaws.rbin#RetentionPeriod" + }, + "Description": { + "target": "com.amazonaws.rbin#Description", + "traits": { + "smithy.api#documentation": "

                                                                      The retention rule description.

                                                                      " + } + }, + "ResourceType": { + "target": "com.amazonaws.rbin#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type retained by the retention rule.

                                                                      " + } + }, + "ResourceTags": { + "target": "com.amazonaws.rbin#ResourceTags", + "traits": { + "smithy.api#documentation": "

                                                                      Information about the resource tags used to identify resources that are retained by the retention \n rule.

                                                                      " + } + }, + "Status": { + "target": "com.amazonaws.rbin#RuleStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The state of the retention rule. Only retention rules that are in the available state retain snapshots.

                                                                      " + } + } + } + }, + "com.amazonaws.rbin#ValidationException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.rbin#ErrorMessage" + }, + "Reason": { + "target": "com.amazonaws.rbin#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      The reason for the exception.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      One or more of the parameters in the request is not valid.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.rbin#ValidationExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "INVALID_PAGE_TOKEN", + "name": "INVALID_PAGE_TOKEN" + }, + { + "value": "INVALID_PARAMETER_VALUE", + "name": "INVALID_PARAMETER_VALUE" + } + ] + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/redshift-data.json b/codegen/sdk-codegen/aws-models/redshift-data.json index b2eb4100650f..456b543ae9cd 100644 --- a/codegen/sdk-codegen/aws-models/redshift-data.json +++ b/codegen/sdk-codegen/aws-models/redshift-data.json @@ -34,7 +34,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Runs one or more SQL statements, which can be data manipulation language (DML) or data definition\n language (DDL). \n Depending on the authorization\n method, use one of the following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the\n database user name. Permission to call the redshift:GetClusterCredentials\n operation is required to use this method.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Runs one or more SQL statements, which can be data manipulation language (DML) or data definition\n language (DDL). \n Depending on the authorization\n method, use one of the following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.redshiftdata#BatchExecuteStatementException": { @@ -73,8 +73,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "SecretArn": { @@ -86,7 +85,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "Database": { @@ -128,7 +127,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier.

                                                                      " + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is not returned when connecting to a serverless endpoint.

                                                                      " } }, "DbUser": { @@ -181,6 +180,9 @@ "target": "com.amazonaws.redshiftdata#CancelStatementResponse" }, "errors": [ + { + "target": "com.amazonaws.redshiftdata#DatabaseConnectionException" + }, { "target": "com.amazonaws.redshiftdata#InternalServerException" }, @@ -316,6 +318,22 @@ "target": "com.amazonaws.redshiftdata#ColumnMetadata" } }, + "com.amazonaws.redshiftdata#DatabaseConnectionException": { + "type": "structure", + "members": { + "Message": { + "target": "com.amazonaws.redshiftdata#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Connection to a database failed.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, "com.amazonaws.redshiftdata#DatabaseList": { "type": "list", "member": { @@ -431,7 +449,7 @@ "HasResultSet": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

                                                                      A value that indicates whether the statement has a result set. The result set can be empty.

                                                                      " + "smithy.api#documentation": "

                                                                      A value that indicates whether the statement has a result set. The result set can be empty. The value is true for an empty result set.\n The value is true if any substatement returns a result set.

                                                                      " } }, "QueryString": { @@ -481,6 +499,9 @@ "target": "com.amazonaws.redshiftdata#DescribeTableResponse" }, "errors": [ + { + "target": "com.amazonaws.redshiftdata#DatabaseConnectionException" + }, { "target": "com.amazonaws.redshiftdata#InternalServerException" }, @@ -489,7 +510,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes the detailed information about a table from metadata in the cluster. The\n information includes its columns. \n A token is returned to page through the column list.\n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the database\n user name. Permission to call the redshift:GetClusterCredentials operation is\n required to use this method.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Describes the detailed information about a table from metadata in the cluster. The\n information includes its columns. \n A token is returned to page through the column list.\n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -505,8 +526,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "SecretArn": { @@ -518,7 +538,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "Database": { @@ -603,7 +623,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Runs an SQL statement, which can be data manipulation language (DML) or data definition\n language (DDL). This statement must be a single SQL statement. \n Depending on the authorization\n method, use one of the following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the\n database user name. Permission to call the redshift:GetClusterCredentials\n operation is required to use this method.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Runs an SQL statement, which can be data manipulation language (DML) or data definition\n language (DDL). This statement must be a single SQL statement. \n Depending on the authorization\n method, use one of the following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.redshiftdata#ExecuteStatementException": { @@ -643,8 +663,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "SecretArn": { @@ -656,7 +675,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "Database": { @@ -704,7 +723,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier.

                                                                      " + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is not returned when connecting to a serverless endpoint.

                                                                      " } }, "DbUser": { @@ -883,6 +902,9 @@ "target": "com.amazonaws.redshiftdata#ListDatabasesResponse" }, "errors": [ + { + "target": "com.amazonaws.redshiftdata#DatabaseConnectionException" + }, { "target": "com.amazonaws.redshiftdata#InternalServerException" }, @@ -891,7 +913,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      List the databases in a cluster. \n A token is returned to page through the database list.\n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the\n database user name. Permission to call the redshift:GetClusterCredentials\n operation is required to use this method.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      List the databases in a cluster. \n A token is returned to page through the database list.\n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -907,8 +929,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "Database": { @@ -927,7 +948,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "NextToken": { @@ -970,6 +991,9 @@ "target": "com.amazonaws.redshiftdata#ListSchemasResponse" }, "errors": [ + { + "target": "com.amazonaws.redshiftdata#DatabaseConnectionException" + }, { "target": "com.amazonaws.redshiftdata#InternalServerException" }, @@ -978,7 +1002,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Lists the schemas in a database. \n A token is returned to page through the schema list. \n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the\n database user name. Permission to call the redshift:GetClusterCredentials\n operation is required to use this method.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Lists the schemas in a database. \n A token is returned to page through the schema list. \n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -994,8 +1018,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "SecretArn": { @@ -1007,7 +1030,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "Database": { @@ -1158,6 +1181,9 @@ "target": "com.amazonaws.redshiftdata#ListTablesResponse" }, "errors": [ + { + "target": "com.amazonaws.redshiftdata#DatabaseConnectionException" + }, { "target": "com.amazonaws.redshiftdata#InternalServerException" }, @@ -1166,7 +1192,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then \n all tables in the database are returned.\n A token is returned to page through the table list. \n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - specify the Amazon Resource Name (ARN) of the secret, the database name, and the \n cluster identifier that matches the cluster in the secret.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - specify the cluster identifier, the database name, and the\n database user name. Permission to call the redshift:GetClusterCredentials\n operation is required to use this method.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      List the tables in a database. If neither SchemaPattern nor TablePattern are specified, then \n all tables in the database are returned.\n A token is returned to page through the table list. \n Depending on the authorization method, use one of the\n following combinations of request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Secrets Manager - when connecting to a cluster, specify the Amazon Resource Name (ARN) of the secret, the database name, and the cluster identifier that matches the cluster in the secret.\nWhen connecting to a serverless endpoint, specify the Amazon Resource Name (ARN) of the secret and the database name.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Temporary credentials - when connecting to a cluster, specify the cluster identifier, the database name, and the database user name. \nAlso, permission to call the redshift:GetClusterCredentials operation is required.\nWhen connecting to a serverless endpoint, specify the database name.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -1182,8 +1208,7 @@ "ClusterIdentifier": { "target": "com.amazonaws.redshiftdata#Location", "traits": { - "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when authenticating using either Secrets Manager or temporary credentials.

                                                                      ", - "smithy.api#required": {} + "smithy.api#documentation": "

                                                                      The cluster identifier. This parameter is required when connecting to a cluster and authenticating using either Secrets Manager or temporary credentials.

                                                                      " } }, "SecretArn": { @@ -1195,7 +1220,7 @@ "DbUser": { "target": "com.amazonaws.redshiftdata#String", "traits": { - "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when authenticating using temporary credentials.

                                                                      " + "smithy.api#documentation": "

                                                                      The database user name. This parameter is required when connecting to a cluster and authenticating using temporary credentials.

                                                                      " } }, "Database": { @@ -1653,7 +1678,7 @@ "HasResultSet": { "target": "smithy.api#Boolean", "traits": { - "smithy.api#documentation": "

                                                                      A value that indicates whether the statement has a result set. The result set can be empty.

                                                                      " + "smithy.api#documentation": "

                                                                      A value that indicates whether the statement has a result set. The result set can be empty. The value is true for an empty result set.

                                                                      " } } }, diff --git a/codegen/sdk-codegen/aws-models/rum.json b/codegen/sdk-codegen/aws-models/rum.json new file mode 100644 index 000000000000..af1e618f3a23 --- /dev/null +++ b/codegen/sdk-codegen/aws-models/rum.json @@ -0,0 +1,1536 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.rum#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      You don't have sufficient permissions to perform this action.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.rum#AppMonitor": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the app monitor.

                                                                      " + } + }, + "Domain": { + "target": "com.amazonaws.rum#AppMonitorDomain", + "traits": { + "smithy.api#documentation": "

                                                                      The top-level internet domain name for which your application has administrative authority.

                                                                      " + } + }, + "Id": { + "target": "com.amazonaws.rum#AppMonitorId", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of this app monitor.

                                                                      " + } + }, + "Created": { + "target": "com.amazonaws.rum#ISOTimestampString", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that this app monitor was created.

                                                                      " + } + }, + "LastModified": { + "target": "com.amazonaws.rum#ISOTimestampString", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time of the most recent changes to this app monitor's configuration.

                                                                      " + } + }, + "Tags": { + "target": "com.amazonaws.rum#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with this app monitor.

                                                                      " + } + }, + "State": { + "target": "com.amazonaws.rum#StateEnum", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of the app monitor.

                                                                      " + } + }, + "AppMonitorConfiguration": { + "target": "com.amazonaws.rum#AppMonitorConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains much of the configuration data for the app monitor.

                                                                      " + } + }, + "DataStorage": { + "target": "com.amazonaws.rum#DataStorage", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about whether this app monitor stores a copy of\n the telemetry data that RUM collects using CloudWatch Logs.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A RUM app monitor collects telemetry data from your application and sends that\n data to RUM. The data includes performance and reliability information such as page load time, client-side errors, \n and user behavior.

                                                                      " + } + }, + "com.amazonaws.rum#AppMonitorConfiguration": { + "type": "structure", + "members": { + "IdentityPoolId": { + "target": "com.amazonaws.rum#IdentityPoolId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the Amazon Cognito identity pool \n that is used to authorize the sending of data to RUM.

                                                                      " + } + }, + "ExcludedPages": { + "target": "com.amazonaws.rum#Pages", + "traits": { + "smithy.api#documentation": "

                                                                      A list of URLs in your website or application to exclude from RUM data collection.

                                                                      \n

                                                                      You can't include both ExcludedPages and IncludedPages in the same operation.

                                                                      " + } + }, + "IncludedPages": { + "target": "com.amazonaws.rum#Pages", + "traits": { + "smithy.api#documentation": "

                                                                      If this app monitor is to collect data from only certain pages in your application, this structure lists those pages.

                                                                      \n \n

                                                                      You can't include both ExcludedPages and IncludedPages in the same operation.

                                                                      " + } + }, + "FavoritePages": { + "target": "com.amazonaws.rum#FavoritePages", + "traits": { + "smithy.api#documentation": "

                                                                      A list of pages in the CloudWatch RUM console that are to be displayed with a \"favorite\" icon.

                                                                      " + } + }, + "SessionSampleRate": { + "target": "com.amazonaws.rum#SessionSampleRate", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies the percentage of user sessions to use for RUM data collection. Choosing a higher percentage gives you\n more data but also incurs more costs.

                                                                      \n

                                                                      The number you specify is the percentage of user sessions that will be used.

                                                                      \n

                                                                      If you omit this parameter, the default of 10 is used.

                                                                      " + } + }, + "GuestRoleArn": { + "target": "com.amazonaws.rum#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the guest IAM role that is attached to the Amazon Cognito identity pool \n that is used to authorize the sending of data to RUM.

                                                                      " + } + }, + "AllowCookies": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      If you set this to true, the RUM web client sets two cookies, a session\n cookie and a user cookie. The cookies allow the RUM web client to collect data relating to\n the number of users an application has and the behavior of the application across a\n sequence of events. Cookies are stored in the top-level domain of the current page.

                                                                      " + } + }, + "Telemetries": { + "target": "com.amazonaws.rum#Telemetries", + "traits": { + "smithy.api#documentation": "

                                                                      An array that lists the types of telemetry data that this app monitor is to collect.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n errors indicates that RUM collects data about unhandled JavaScript errors raised\n by your application.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n performance indicates that RUM collects performance data about how your application\n and its resources are loaded and rendered. This includes Core Web Vitals.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n http indicates that RUM collects data about HTTP errors thrown by your application.

                                                                        \n
                                                                      • \n
                                                                      " + } + }, + "EnableXRay": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      If you set this to true, RUM enables X-Ray tracing for\n the user sessions that RUM samples. RUM adds an X-Ray trace header to allowed\n HTTP requests. It also records an X-Ray segment for allowed HTTP requests.\n You can see traces and segments from these user sessions in the X-Ray console\n and the CloudWatch ServiceLens console. For more information, see What is X-Ray?\n

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This structure contains much of the configuration data for the app monitor.

                                                                      " + } + }, + "com.amazonaws.rum#AppMonitorDetails": { + "type": "structure", + "members": { + "name": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the app monitor.

                                                                      " + } + }, + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the app monitor.

                                                                      " + } + }, + "version": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the app monitor.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the RUM app monitor.

                                                                      " + } + }, + "com.amazonaws.rum#AppMonitorDomain": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 253 + }, + "smithy.api#pattern": "^(localhost)|^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$|^(?![-.])([A-Za-z0-9-\\.\\-]{0,63})((?![-])([a-zA-Z0-9]{1}|^[a-zA-Z0-9]{0,1}))\\.(?![-])[A-Za-z-0-9]{1,63}((?![-])([a-zA-Z0-9]{1}|^[a-zA-Z0-9]{0,1}))|^(\\*\\.)(?![-.])([A-Za-z0-9-\\.\\-]{0,63})((?![-])([a-zA-Z0-9]{1}|^[a-zA-Z0-9]{0,1}))\\.(?![-])[A-Za-z-0-9]{1,63}((?![-])([a-zA-Z0-9]{1}|^[a-zA-Z0-9]{0,1}))" + } + }, + "com.amazonaws.rum#AppMonitorId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 36, + "max": 36 + }, + "smithy.api#pattern": "^[a-fA-F0-9]{8}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{4}-[a-fA-F0-9]{12}$" + } + }, + "com.amazonaws.rum#AppMonitorName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^(?!\\.)[\\.\\-_#A-Za-z0-9]+$" + } + }, + "com.amazonaws.rum#AppMonitorResource": { + "type": "resource", + "identifiers": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName" + } + }, + "read": { + "target": "com.amazonaws.rum#GetAppMonitor" + }, + "update": { + "target": "com.amazonaws.rum#UpdateAppMonitor" + }, + "delete": { + "target": "com.amazonaws.rum#DeleteAppMonitor" + }, + "list": { + "target": "com.amazonaws.rum#ListAppMonitors" + }, + "operations": [ + { + "target": "com.amazonaws.rum#CreateAppMonitor" + }, + { + "target": "com.amazonaws.rum#GetAppMonitorData" + } + ], + "traits": { + "aws.api#arn": { + "template": "appmonitor/{Name}", + "absolute": false, + "noAccount": false, + "noRegion": false + } + } + }, + "com.amazonaws.rum#AppMonitorSummary": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of this app monitor.

                                                                      " + } + }, + "Id": { + "target": "com.amazonaws.rum#AppMonitorId", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of this app monitor.

                                                                      " + } + }, + "Created": { + "target": "com.amazonaws.rum#ISOTimestampString", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time that the app monitor was created.

                                                                      " + } + }, + "LastModified": { + "target": "com.amazonaws.rum#ISOTimestampString", + "traits": { + "smithy.api#documentation": "

                                                                      The date and time of the most recent changes to this app monitor's configuration.

                                                                      " + } + }, + "State": { + "target": "com.amazonaws.rum#StateEnum", + "traits": { + "smithy.api#documentation": "

                                                                      The current state of this app monitor.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that includes some data about app monitors and their settings.

                                                                      " + } + }, + "com.amazonaws.rum#AppMonitorSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#AppMonitorSummary" + } + }, + "com.amazonaws.rum#Arn": { + "type": "string", + "traits": { + "smithy.api#pattern": "arn:[^:]*:[^:]*:[^:]*:[^:]*:.*" + } + }, + "com.amazonaws.rum#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the resource that is associated with the error.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the resource that is associated with the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This operation attempted to create a resource that already exists.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.rum#CreateAppMonitor": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#CreateAppMonitorRequest" + }, + "output": { + "target": "com.amazonaws.rum#CreateAppMonitorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#ConflictException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a Amazon CloudWatch RUM app monitor, which collects telemetry data from your application and sends that\n data to RUM. The data includes performance and reliability information such as page load time, client-side errors, \n and user behavior.

                                                                      \n

                                                                      You use this operation only to create a new app monitor. To update an existing app monitor, use UpdateAppMonitor instead.

                                                                      \n

                                                                      After you create an app monitor, sign in to the CloudWatch RUM console to get \n the JavaScript code snippet to add to your web application. For more information, see\n How do I find a code snippet\n that I've already generated?\n

                                                                      ", + "smithy.api#externalDocumentation": { + "API Reference": "https://docs.aws.amazon.com/rum/latest/developerguide/CreateAppMonitor.html" + }, + "smithy.api#http": { + "method": "POST", + "uri": "/appmonitor", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rum#CreateAppMonitorRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      A name for the app monitor.

                                                                      ", + "smithy.api#required": {} + } + }, + "Domain": { + "target": "com.amazonaws.rum#AppMonitorDomain", + "traits": { + "smithy.api#documentation": "

                                                                      The top-level internet domain name for which your application has administrative authority.

                                                                      ", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.rum#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the app monitor.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n \n

                                                                      You can associate as many as 50 tags with an app monitor.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      " + } + }, + "AppMonitorConfiguration": { + "target": "com.amazonaws.rum#AppMonitorConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains much of the configuration data for the app monitor. If you are using \n Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the \n Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own \n authorization method. For more information, see \n Authorize your application\n to send data to Amazon Web Services.

                                                                      \n

                                                                      If you omit this argument, the sample rate used for RUM is set to 10% of the user sessions.

                                                                      " + } + }, + "CwLogEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM \n sends a copy of this telemetry data to Amazon CloudWatch Logs\n in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur\n Amazon CloudWatch Logs charges.

                                                                      \n

                                                                      If you omit this parameter, the default is false.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#CreateAppMonitorResponse": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.rum#AppMonitorId", + "traits": { + "smithy.api#documentation": "

                                                                      The unique ID of the new app monitor.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#CwLog": { + "type": "structure", + "members": { + "CwLogEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Indicated whether the app monitor stores copies of the data\n that RUM collects in CloudWatch Logs.

                                                                      " + } + }, + "CwLogGroup": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the log group where the copies are stored.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the information about whether the app monitor stores copies of the data\n that RUM collects in CloudWatch Logs. If it does, this structure also contains the name of the log group.

                                                                      " + } + }, + "com.amazonaws.rum#DataStorage": { + "type": "structure", + "members": { + "CwLog": { + "target": "com.amazonaws.rum#CwLog", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the information about whether the app monitor stores copies of the data\n that RUM collects in CloudWatch Logs. If it does, this structure also contains the name of the log group.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about whether this app monitor stores a copy of\n the telemetry data that RUM collects using CloudWatch Logs.

                                                                      " + } + }, + "com.amazonaws.rum#DeleteAppMonitor": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#DeleteAppMonitorRequest" + }, + "output": { + "target": "com.amazonaws.rum#DeleteAppMonitorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#ConflictException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes an existing app monitor. This immediately stops the collection of data.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/appmonitor/{Name}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rum#DeleteAppMonitorRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the app monitor to delete.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#DeleteAppMonitorResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rum#EventData": { + "type": "string" + }, + "com.amazonaws.rum#EventDataList": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#EventData" + } + }, + "com.amazonaws.rum#FavoritePages": { + "type": "list", + "member": { + "target": "smithy.api#String" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.rum#GetAppMonitor": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#GetAppMonitorRequest" + }, + "output": { + "target": "com.amazonaws.rum#GetAppMonitorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the complete configuration information for one app monitor.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/appmonitor/{Name}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.rum#GetAppMonitorData": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#GetAppMonitorDataRequest" + }, + "output": { + "target": "com.amazonaws.rum#GetAppMonitorDataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves the raw performance events that RUM has collected from your web application,\n so that you can do your own processing or analysis of this data.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/appmonitor/{Name}/data", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "Events", + "pageSize": "MaxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.rum#GetAppMonitorDataRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the app monitor that collected the data that you want to retrieve.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TimeRange": { + "target": "com.amazonaws.rum#TimeRange", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines the time range that you want to retrieve results from.

                                                                      ", + "smithy.api#required": {} + } + }, + "Filters": { + "target": "com.amazonaws.rum#QueryFilters", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that you can use to filter the results to those that match one or\n more sets of key-value pairs that you specify.

                                                                      " + } + }, + "MaxResults": { + "target": "com.amazonaws.rum#MaxQueryResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in one operation.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.rum#Token", + "traits": { + "smithy.api#documentation": "

                                                                      Use the token returned by the previous operation to request the next page of results.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#GetAppMonitorDataResponse": { + "type": "structure", + "members": { + "Events": { + "target": "com.amazonaws.rum#EventDataList", + "traits": { + "smithy.api#documentation": "

                                                                      The events that RUM collected that match your request.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.rum#Token", + "traits": { + "smithy.api#documentation": "

                                                                      A token that you can use in a subsequent operation to retrieve the next set of\n results.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#GetAppMonitorRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The app monitor to retrieve information for.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#GetAppMonitorResponse": { + "type": "structure", + "members": { + "AppMonitor": { + "target": "com.amazonaws.rum#AppMonitor", + "traits": { + "smithy.api#documentation": "

                                                                      A structure containing all the configuration information for the app monitor.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#ISOTimestampString": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 19, + "max": 19 + }, + "smithy.api#pattern": "/d{4}-[01]/d-[0-3]/dT[0-2]/d:[0-5]/d:[0-5]/d/./d+([+-][0-2]/d:[0-5]/d|Z)" + } + }, + "com.amazonaws.rum#IdentityPoolId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 55 + }, + "smithy.api#pattern": "[\\w-]+:[0-9a-f-]+" + } + }, + "com.amazonaws.rum#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The value of a parameter in the request caused an error.

                                                                      ", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Internal service exception.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500, + "smithy.api#retryable": {} + } + }, + "com.amazonaws.rum#JsonValue": { + "type": "string", + "traits": { + "smithy.api#mediaType": "application/json" + } + }, + "com.amazonaws.rum#ListAppMonitors": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#ListAppMonitorsRequest" + }, + "output": { + "target": "com.amazonaws.rum#ListAppMonitorsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Returns a list of the Amazon CloudWatch RUM app monitors in the account.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/appmonitors", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "items": "AppMonitorSummaries", + "pageSize": "MaxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.rum#ListAppMonitorsRequest": { + "type": "structure", + "members": { + "MaxResults": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return in one operation.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      Use the token returned by the previous operation to request the next page of results.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + } + } + }, + "com.amazonaws.rum#ListAppMonitorsResponse": { + "type": "structure", + "members": { + "NextToken": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      A token that you can use in a subsequent operation to retrieve the next set of\n results.

                                                                      " + } + }, + "AppMonitorSummaries": { + "target": "com.amazonaws.rum#AppMonitorSummaryList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain information about the returned app monitors.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.rum#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Displays the tags associated with a CloudWatch RUM resource.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{ResourceArn}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.rum#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rum#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource that you want to see the tags of.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rum#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource that you are viewing.

                                                                      ", + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.rum#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys and values associated with the resource you specified.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#MaxQueryResults": { + "type": "integer", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.rum#Pages": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#Url" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.rum#PutRumEvents": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#PutRumEventsRequest" + }, + "output": { + "target": "com.amazonaws.rum#PutRumEventsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Sends telemetry events about your application performance and user behavior to CloudWatch RUM. The code \n snippet that RUM generates for you to add to your application includes PutRumEvents operations to \n send this data to RUM.

                                                                      \n

                                                                      Each PutRumEvents operation can send a batch of events from one user session.

                                                                      ", + "smithy.api#endpoint": { + "hostPrefix": "dataplane." + }, + "smithy.api#http": { + "method": "POST", + "uri": "/appmonitors/{Id}/", + "code": 200 + } + } + }, + "com.amazonaws.rum#PutRumEventsRequest": { + "type": "structure", + "members": { + "Id": { + "target": "com.amazonaws.rum#AppMonitorId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the app monitor that is sending this data.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "BatchId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      A unique identifier for this batch of RUM event data.

                                                                      ", + "smithy.api#required": {} + } + }, + "AppMonitorDetails": { + "target": "com.amazonaws.rum#AppMonitorDetails", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the app monitor that collected this telemetry information.

                                                                      ", + "smithy.api#required": {} + } + }, + "UserDetails": { + "target": "com.amazonaws.rum#UserDetails", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the user session that this batch of events was collected from.

                                                                      ", + "smithy.api#required": {} + } + }, + "RumEvents": { + "target": "com.amazonaws.rum#RumEventList", + "traits": { + "smithy.api#documentation": "

                                                                      An array of structures that contain the telemetry event data.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#PutRumEventsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rum#QueryFilter": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#QueryFilterKey", + "traits": { + "smithy.api#documentation": "

                                                                      The name of a key to search for. \n The filter returns only the events that match the Name\n and Values that you specify.\n

                                                                      \n

                                                                      Valid values for Name are Browser | Device | Country |\n Page | OS | EventType | Invert\n

                                                                      " + } + }, + "Values": { + "target": "com.amazonaws.rum#QueryFilterValueList", + "traits": { + "smithy.api#documentation": "

                                                                      The values of the Name that are to be be included in the returned results.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines a key and values that you can use to filter the results. The\n only performance events that are returned are those that have values matching the ones that\n you specify in one of your QueryFilter structures.

                                                                      \n

                                                                      For example, you could specify Browser as the Name\n and specify Chrome,Firefox as the Values to return \n events generated only from those browsers.

                                                                      \n

                                                                      Specifying Invert as the Name\n works as a \"not equal to\" filter. For example, specify Invert as the Name\n and specify Chrome as the value to return all events except events from \n user sessions with the Chrome browser.

                                                                      " + } + }, + "com.amazonaws.rum#QueryFilterKey": { + "type": "string" + }, + "com.amazonaws.rum#QueryFilterValue": { + "type": "string" + }, + "com.amazonaws.rum#QueryFilterValueList": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#QueryFilterValue" + } + }, + "com.amazonaws.rum#QueryFilters": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#QueryFilter" + } + }, + "com.amazonaws.rum#QueryTimestamp": { + "type": "long" + }, + "com.amazonaws.rum#RUM": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "RUM", + "arnNamespace": "rum", + "cloudFormationName": "RUM", + "cloudTrailEventSource": "rum.amazonaws.com", + "endpointPrefix": "rum" + }, + "aws.auth#sigv4": { + "name": "rum" + }, + "aws.protocols#restJson1": {}, + "smithy.api#cors": { + "additionalAllowedHeaders": [ + "Authorization", + "x-amz-content-sha256", + "X-Amz-Date", + "x-amzn-trace-id", + "X-Amz-Security-Token", + "X-Amz-Targe", + "x-amz-user-agent", + "*", + "content-type", + "x-amzn-platform-id", + "Date" + ], + "additionalExposedHeaders": [ + "date", + "x-amz-apigw-id", + "x-amzn-trace-id", + "x-amzn-errortype", + "x-amzn-requestid", + "x-amzn-errormessage" + ] + }, + "smithy.api#documentation": "

                                                                      With Amazon CloudWatch RUM, you can perform real-user monitoring to collect client-side data about \n your web application performance from actual user sessions in real time. The data collected includes page load \n times, client-side errors, and user behavior. When you view this data, you can see it all aggregated together and \n also see breakdowns by the browsers and devices that your customers use.

                                                                      \n \n

                                                                      You can use the collected data to quickly identify and debug client-side performance issues. CloudWatch \n RUM helps you visualize anomalies in your application performance and find relevant debugging data such as error \n messages, stack traces, and user sessions. You can also use RUM to \n understand the range of end-user impact including the number of users, geolocations, and browsers used.

                                                                      ", + "smithy.api#title": "CloudWatch RUM" + }, + "version": "2018-05-10", + "operations": [ + { + "target": "com.amazonaws.rum#ListTagsForResource" + }, + { + "target": "com.amazonaws.rum#PutRumEvents" + }, + { + "target": "com.amazonaws.rum#TagResource" + }, + { + "target": "com.amazonaws.rum#UntagResource" + } + ], + "resources": [ + { + "target": "com.amazonaws.rum#AppMonitorResource" + } + ] + }, + "com.amazonaws.rum#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "resourceName": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the resource that is associated with the error.

                                                                      ", + "smithy.api#required": {} + } + }, + "resourceType": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the resource that is associated with the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Resource not found.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.rum#RumEvent": { + "type": "structure", + "members": { + "id": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      A unique ID for this event.

                                                                      ", + "smithy.api#required": {} + } + }, + "timestamp": { + "target": "smithy.api#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The exact time that this event occurred.

                                                                      ", + "smithy.api#required": {} + } + }, + "type": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The JSON schema that denotes the type of event this is, such as a page load or a new session.

                                                                      ", + "smithy.api#required": {} + } + }, + "metadata": { + "target": "com.amazonaws.rum#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      Metadata about this event, which contains a JSON serialization of the identity of the user for\n this session. The user information comes from information such as the HTTP user-agent request header\n and document interface.

                                                                      " + } + }, + "details": { + "target": "com.amazonaws.rum#JsonValue", + "traits": { + "smithy.api#documentation": "

                                                                      A string containing details about the event.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains the information for one performance event that RUM collects from a user session with your\n application.

                                                                      " + } + }, + "com.amazonaws.rum#RumEventList": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#RumEvent" + } + }, + "com.amazonaws.rum#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      This request exceeds a service quota.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.rum#SessionSampleRate": { + "type": "double", + "traits": { + "smithy.api#range": { + "min": 0, + "max": 1 + } + } + }, + "com.amazonaws.rum#StateEnum": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "CREATED", + "name": "CREATED" + }, + { + "value": "DELETING", + "name": "DELETING" + }, + { + "value": "ACTIVE", + "name": "ACTIVE" + } + ] + } + }, + "com.amazonaws.rum#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^(?!aws:)[a-zA-Z+-=._:/]+$" + } + }, + "com.amazonaws.rum#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 50 + } + } + }, + "com.amazonaws.rum#TagMap": { + "type": "map", + "key": { + "target": "com.amazonaws.rum#TagKey" + }, + "value": { + "target": "com.amazonaws.rum#TagValue" + } + }, + "com.amazonaws.rum#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.rum#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Assigns one or more tags (key-value pairs) to the specified CloudWatch RUM resource. Currently, \n the only resources that \n can be tagged app monitors.

                                                                      \n

                                                                      Tags can help you organize and categorize your resources. You can also use them to scope user\n permissions by granting a user\n permission to access or change only resources with certain tag values.

                                                                      \n

                                                                      Tags don't have any semantic meaning to Amazon Web Services and are interpreted strictly as strings of characters.

                                                                      \n

                                                                      You can use the TagResource action with a resource that already has tags. \n If you specify a new tag key for the resource, \n this tag is appended to the list of tags associated\n with the alarm. If you specify a tag key that is already associated with the resource, the new tag value that you specify replaces\n the previous value for that tag.

                                                                      \n

                                                                      You can associate as many as 50 tags with a resource.

                                                                      \n

                                                                      For more information, see Tagging Amazon Web Services resources.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{ResourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rum#TagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rum#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the CloudWatch RUM resource that you're adding tags to.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.rum#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      The list of key-value pairs to associate with the resource.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rum#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + } + } + }, + "com.amazonaws.rum#Telemetries": { + "type": "list", + "member": { + "target": "com.amazonaws.rum#Telemetry" + } + }, + "com.amazonaws.rum#Telemetry": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "errors", + "name": "ERRORS", + "documentation": "Includes JS error event plugin" + }, + { + "value": "performance", + "name": "PERFORMANCE", + "documentation": "Includes navigation, paint, resource and web vital event plugins" + }, + { + "value": "http", + "name": "HTTP", + "documentation": "Includes X-Ray Xhr and X-Ray Fetch plugin" + } + ] + } + }, + "com.amazonaws.rum#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + }, + "serviceCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service that is associated with the error.

                                                                      " + } + }, + "quotaCode": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the service quota that was exceeded.

                                                                      " + } + }, + "retryAfterSeconds": { + "target": "smithy.api#Integer", + "traits": { + "smithy.api#documentation": "

                                                                      The value of a parameter in the request caused an error.

                                                                      ", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The request was throttled because of quota limits.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429, + "smithy.api#retryable": { + "throttling": true + } + } + }, + "com.amazonaws.rum#TimeRange": { + "type": "structure", + "members": { + "After": { + "target": "com.amazonaws.rum#QueryTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The beginning of the time range to retrieve performance events from.

                                                                      ", + "smithy.api#required": {} + } + }, + "Before": { + "target": "com.amazonaws.rum#QueryTimestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The end of the time range to retrieve performance events from. If you omit this, the time \n range extends to the time that this operation is performed.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that defines the time range that you want to retrieve results from.

                                                                      " + } + }, + "com.amazonaws.rum#Token": { + "type": "string" + }, + "com.amazonaws.rum#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.rum#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes one or more tags from the specified resource.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{ResourceArn}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.rum#UntagResourceRequest": { + "type": "structure", + "members": { + "ResourceArn": { + "target": "com.amazonaws.rum#Arn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the CloudWatch RUM resource that you're removing tags from.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "TagKeys": { + "target": "com.amazonaws.rum#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys to remove from the resource.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.rum#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rum#UpdateAppMonitor": { + "type": "operation", + "input": { + "target": "com.amazonaws.rum#UpdateAppMonitorRequest" + }, + "output": { + "target": "com.amazonaws.rum#UpdateAppMonitorResponse" + }, + "errors": [ + { + "target": "com.amazonaws.rum#AccessDeniedException" + }, + { + "target": "com.amazonaws.rum#ConflictException" + }, + { + "target": "com.amazonaws.rum#InternalServerException" + }, + { + "target": "com.amazonaws.rum#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.rum#ThrottlingException" + }, + { + "target": "com.amazonaws.rum#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the configuration of an existing app monitor. When you use this operation, only the parts of the app monitor\n configuration that you specify in this operation are changed. For any parameters that you omit, the existing\n values are kept.

                                                                      \n

                                                                      You can't use this operation to change the tags of an existing app monitor. To change the tags of an existing app monitor, use\n TagResource.

                                                                      \n

                                                                      To create a new app monitor, use CreateAppMonitor.

                                                                      \n

                                                                      After you update an app monitor, sign in to the CloudWatch RUM console to get \n the updated JavaScript code snippet to add to your web application. For more information, see\n How do I find a code snippet\n that I've already generated?\n

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/appmonitor/{Name}", + "code": 200 + } + } + }, + "com.amazonaws.rum#UpdateAppMonitorRequest": { + "type": "structure", + "members": { + "Name": { + "target": "com.amazonaws.rum#AppMonitorName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the app monitor to update.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "Domain": { + "target": "com.amazonaws.rum#AppMonitorDomain", + "traits": { + "smithy.api#documentation": "

                                                                      The top-level internet domain name for which your application has administrative authority.

                                                                      " + } + }, + "AppMonitorConfiguration": { + "target": "com.amazonaws.rum#AppMonitorConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains much of the configuration data for the app monitor. If you are using \n Amazon Cognito for authorization, you must include this structure in your request, and it must include the ID of the \n Amazon Cognito identity pool to use for authorization. If you don't include AppMonitorConfiguration, you must set up your own \n authorization method. For more information, see \n Authorize your application\n to send data to Amazon Web Services.

                                                                      " + } + }, + "CwLogEnabled": { + "target": "smithy.api#Boolean", + "traits": { + "smithy.api#documentation": "

                                                                      Data collected by RUM is kept by RUM for 30 days and then deleted. This parameter specifies whether RUM \n sends a copy of this telemetry data to Amazon CloudWatch Logs\n in your account. This enables you to keep the telemetry data for more than 30 days, but it does incur\n Amazon CloudWatch Logs charges.

                                                                      " + } + } + } + }, + "com.amazonaws.rum#UpdateAppMonitorResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.rum#Url": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1260 + }, + "smithy.api#pattern": "https?:\\/\\/(www\\.)?[-a-zA-Z0-9@:%._\\+~#=]{1,256}\\.[a-zA-Z0-9()]{1,6}\\b([-a-zA-Z0-9()@:%_\\+.~#?&//=]*)" + } + }, + "com.amazonaws.rum#UserDetails": { + "type": "structure", + "members": { + "userId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the user for this user session. This ID is generated by RUM and does not include any \n personally identifiable information about the user.

                                                                      " + } + }, + "sessionId": { + "target": "smithy.api#String", + "traits": { + "smithy.api#documentation": "

                                                                      The session ID that the performance events are from.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A structure that contains information about the user session that this batch of events was collected from.

                                                                      " + } + }, + "com.amazonaws.rum#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "smithy.api#String", + "traits": { + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      One of the arguments for the request is not valid.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + } + } +} diff --git a/codegen/sdk-codegen/aws-models/s3.json b/codegen/sdk-codegen/aws-models/s3.json index 659023fb7de4..52e45df95ac2 100644 --- a/codegen/sdk-codegen/aws-models/s3.json +++ b/codegen/sdk-codegen/aws-models/s3.json @@ -1408,7 +1408,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a copy of an object that is already stored in Amazon S3.

                                                                      \n \n

                                                                      You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy\n an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API.\n For more information, see Copy Object Using the REST Multipart Upload API.

                                                                      \n
                                                                      \n

                                                                      All copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the Region\n that you want to copy the object from and the Region that you want to copy the object to\n must be enabled for your account.

                                                                      \n

                                                                      A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. This means that a 200 OK\n response can contain either a success or an error. Design your application to parse the\n contents of the response and handle it appropriately.

                                                                      \n

                                                                      If the copy is successful, you receive a response with information about the copied\n object.

                                                                      \n \n

                                                                      If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.

                                                                      \n
                                                                      \n

                                                                      The copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.

                                                                      \n \n

                                                                      Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer Acceleration.

                                                                      \n
                                                                      \n

                                                                      \n Metadata\n

                                                                      \n

                                                                      When copying an object, you can preserve all metadata (default) or specify new metadata.\n However, the ACL is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.

                                                                      \n

                                                                      To specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.

                                                                      \n

                                                                      \n \n x-amz-copy-source-if Headers\n

                                                                      \n

                                                                      To only copy an object under certain conditions, such as whether the Etag\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-match\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-none-match\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-unmodified-since\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-modified-since\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      If both the x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-match condition evaluates to true

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      If both the x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed response\n code:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-none-match condition evaluates to false

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-modified-since condition evaluates to\n true

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed.

                                                                      \n
                                                                      \n

                                                                      \n Server-side encryption\n

                                                                      \n

                                                                      When you perform a CopyObject operation, you can optionally use the appropriate encryption-related \n headers to encrypt the object using server-side encryption with Amazon Web Services managed encryption keys \n (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 \n encrypts your data as it writes it to disks in its data centers and decrypts the data when \n you access it. For more information about server-side encryption, see Using\n Server-Side Encryption.

                                                                      \n

                                                                      If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more\n information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

                                                                      \n

                                                                      \n Access Control List (ACL)-Specific Request\n Headers\n

                                                                      \n

                                                                      When copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.

                                                                      \n\n

                                                                      \n Storage Class Options\n

                                                                      \n

                                                                      You can use the CopyObject action to change the storage class of an\n object that is already stored in Amazon S3 using the StorageClass parameter. For\n more information, see Storage\n Classes in the Amazon S3 User Guide.

                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      By default, x-amz-copy-source identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId subresource.

                                                                      \n

                                                                      If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id response header in the response.

                                                                      \n

                                                                      If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.

                                                                      \n

                                                                      If the source object's storage class is GLACIER, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more information, see\n RestoreObject.

                                                                      \n

                                                                      The following operations are related to CopyObject:

                                                                      \n \n

                                                                      For more information, see Copying\n Objects.

                                                                      ", + "smithy.api#documentation": "

                                                                      Creates a copy of an object that is already stored in Amazon S3.

                                                                      \n \n

                                                                      You can store individual objects of up to 5 TB in Amazon S3. You create a copy of your\n object up to 5 GB in size in a single atomic action using this API. However, to copy\n an object greater than 5 GB, you must use the multipart upload Upload Part - Copy API.\n For more information, see Copy Object Using the REST Multipart Upload API.

                                                                      \n
                                                                      \n

                                                                      All copy requests must be authenticated. Additionally, you must have\n read access to the source object and write\n access to the destination bucket. For more information, see REST Authentication. Both the Region\n that you want to copy the object from and the Region that you want to copy the object to\n must be enabled for your account.

                                                                      \n

                                                                      A copy request might return an error when Amazon S3 receives the copy request or while Amazon S3\n is copying the files. If the error occurs before the copy action starts, you receive a\n standard Amazon S3 error. If the error occurs during the copy operation, the error response is\n embedded in the 200 OK response. This means that a 200 OK\n response can contain either a success or an error. Design your application to parse the\n contents of the response and handle it appropriately.

                                                                      \n

                                                                      If the copy is successful, you receive a response with information about the copied\n object.

                                                                      \n \n

                                                                      If the request is an HTTP 1.1 request, the response is chunk encoded. If it were not,\n it would not contain the content-length, and you would need to read the entire\n body.

                                                                      \n
                                                                      \n

                                                                      The copy request charge is based on the storage class and Region that you specify for\n the destination object. For pricing information, see Amazon S3 pricing.

                                                                      \n \n

                                                                      Amazon S3 transfer acceleration does not support cross-Region copies. If you request a\n cross-Region copy using a transfer acceleration endpoint, you get a 400 Bad\n Request error. For more information, see Transfer Acceleration.

                                                                      \n
                                                                      \n

                                                                      \n Metadata\n

                                                                      \n

                                                                      When copying an object, you can preserve all metadata (default) or specify new metadata.\n However, the ACL is not preserved and is set to private for the user making the request. To\n override the default ACL setting, specify a new ACL when generating a copy request. For\n more information, see Using ACLs.

                                                                      \n

                                                                      To specify whether you want the object metadata copied from the source object or\n replaced with metadata provided in the request, you can optionally add the\n x-amz-metadata-directive header. When you grant permissions, you can use\n the s3:x-amz-metadata-directive condition key to enforce certain metadata\n behavior when objects are uploaded. For more information, see Specifying Conditions in a\n Policy in the Amazon S3 User Guide. For a complete list of\n Amazon S3-specific condition keys, see Actions, Resources, and Condition Keys for\n Amazon S3.

                                                                      \n

                                                                      \n \n x-amz-copy-source-if Headers\n

                                                                      \n

                                                                      To only copy an object under certain conditions, such as whether the Etag\n matches or whether the object was modified before or after a specified date, use the\n following request parameters:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-match\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-none-match\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-unmodified-since\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-modified-since\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      If both the x-amz-copy-source-if-match and\n x-amz-copy-source-if-unmodified-since headers are present in the request\n and evaluate as follows, Amazon S3 returns 200 OK and copies the data:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-match condition evaluates to true

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-unmodified-since condition evaluates to\n false

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      If both the x-amz-copy-source-if-none-match and\n x-amz-copy-source-if-modified-since headers are present in the request and\n evaluate as follows, Amazon S3 returns the 412 Precondition Failed response\n code:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-none-match condition evaluates to false

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n x-amz-copy-source-if-modified-since condition evaluates to\n true

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      All headers with the x-amz- prefix, including\n x-amz-copy-source, must be signed.

                                                                      \n
                                                                      \n

                                                                      \n Server-side encryption\n

                                                                      \n

                                                                      When you perform a CopyObject operation, you can optionally use the appropriate encryption-related \n headers to encrypt the object using server-side encryption with Amazon Web Services managed encryption keys \n (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With server-side encryption, Amazon S3 \n encrypts your data as it writes it to disks in its data centers and decrypts the data when \n you access it. For more information about server-side encryption, see Using\n Server-Side Encryption.

                                                                      \n

                                                                      If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the object. For more\n information, see Amazon S3 Bucket Keys in the Amazon S3 User Guide.

                                                                      \n

                                                                      \n Access Control List (ACL)-Specific Request\n Headers\n

                                                                      \n

                                                                      When copying an object, you can optionally use headers to grant ACL-based permissions.\n By default, all objects are private. Only the owner has full access control. When adding a\n new object, you can grant permissions to individual Amazon Web Services accounts or to predefined groups\n defined by Amazon S3. These permissions are then added to the ACL on the object. For more\n information, see Access Control List (ACL) Overview and Managing ACLs Using the REST\n API.

                                                                      \n

                                                                      If the bucket that you're copying objects to uses the bucket owner enforced setting for\n S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control canned\n ACL or an equivalent form of this ACL expressed in the XML format.

                                                                      \n

                                                                      For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.

                                                                      \n \n

                                                                      If your bucket uses the bucket owner enforced setting for Object Ownership, \n all objects written to the bucket by any account will be owned by the bucket owner.

                                                                      \n
                                                                      \n

                                                                      \n Storage Class Options\n

                                                                      \n

                                                                      You can use the CopyObject action to change the storage class of an\n object that is already stored in Amazon S3 using the StorageClass parameter. For\n more information, see Storage\n Classes in the Amazon S3 User Guide.

                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      By default, x-amz-copy-source identifies the current version of an object\n to copy. If the current version is a delete marker, Amazon S3 behaves as if the object was\n deleted. To copy a different version, use the versionId subresource.

                                                                      \n

                                                                      If you enable versioning on the target bucket, Amazon S3 generates a unique version ID for\n the object being copied. This version ID is different from the version ID of the source\n object. Amazon S3 returns the version ID of the copied object in the\n x-amz-version-id response header in the response.

                                                                      \n

                                                                      If you do not enable versioning or suspend it on the target bucket, the version ID that\n Amazon S3 generates is always null.

                                                                      \n

                                                                      If the source object's storage class is GLACIER, you must restore a copy of this object\n before you can use it as a source object for the copy operation. For more information, see\n RestoreObject.

                                                                      \n

                                                                      The following operations are related to CopyObject:

                                                                      \n \n

                                                                      For more information, see Copying\n Objects.

                                                                      ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=CopyObject", @@ -1877,7 +1877,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

                                                                      \n

                                                                      Not every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.

                                                                      \n

                                                                      If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

                                                                      \n

                                                                      By default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.

                                                                      \n \n

                                                                      If you send your create bucket request to the s3.amazonaws.com endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.

                                                                      \n
                                                                      \n

                                                                      When creating a bucket using this operation, you can optionally specify the accounts or\n groups that should be granted specific permissions on the bucket. There are two ways to\n grant the appropriate permissions using the request headers.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL using the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly using the x-amz-grant-read,\n x-amz-grant-write, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and x-amz-grant-full-control\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.

                                                                        \n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

                                                                        \n

                                                                        \n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.

                                                                      \n
                                                                      \n\n

                                                                      \n Permissions\n

                                                                      \n

                                                                      If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, \n authenticated-read, or if you specify access permissions explicitly through any other ACL, both \n s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the \n CreateBucket request is private, only s3:CreateBucket permission is needed.

                                                                      \n

                                                                      If ObjectLockEnabledForBucket is set to true in your CreateBucket request, \n s3:PutBucketObjectLockConfiguration and s3:PutBucketVersioning permissions are required.

                                                                      \n\n

                                                                      The following operations are related to CreateBucket:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Creates a new S3 bucket. To create a bucket, you must register with Amazon S3 and have a\n valid Amazon Web Services Access Key ID to authenticate requests. Anonymous requests are never allowed to\n create buckets. By creating the bucket, you become the bucket owner.

                                                                      \n

                                                                      Not every string is an acceptable bucket name. For information about bucket naming\n restrictions, see Bucket naming rules.

                                                                      \n

                                                                      If you want to create an Amazon S3 on Outposts bucket, see Create Bucket.

                                                                      \n

                                                                      By default, the bucket is created in the US East (N. Virginia) Region. You can\n optionally specify a Region in the request body. You might choose a Region to optimize\n latency, minimize costs, or address regulatory requirements. For example, if you reside in\n Europe, you will probably find it advantageous to create buckets in the Europe (Ireland)\n Region. For more information, see Accessing a\n bucket.

                                                                      \n \n

                                                                      If you send your create bucket request to the s3.amazonaws.com endpoint,\n the request goes to the us-east-1 Region. Accordingly, the signature calculations in\n Signature Version 4 must use us-east-1 as the Region, even if the location constraint in\n the request specifies another Region where the bucket is to be created. If you create a\n bucket in a Region other than US East (N. Virginia), your application must be able to\n handle 307 redirect. For more information, see Virtual hosting of buckets.

                                                                      \n
                                                                      \n

                                                                      \n Access control lists (ACLs)\n

                                                                      \n

                                                                      When creating a bucket using this operation, you can optionally configure the bucket ACL to specify the accounts or\n groups that should be granted specific permissions on the bucket.

                                                                      \n \n

                                                                      If your CreateBucket request includes the BucketOwnerEnforced value for\n the x-amz-object-ownership header, your request can either not specify\n an ACL or specify bucket owner full control ACLs, such as the bucket-owner-full-control\n canned ACL or an equivalent ACL expressed in the XML format. For\n more information, see Controlling object\n ownership in the Amazon S3 User Guide.

                                                                      \n
                                                                      \n

                                                                      There are two ways to grant the appropriate permissions using the request headers.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL using the x-amz-acl request header. Amazon S3\n supports a set of predefined ACLs, known as canned ACLs. Each\n canned ACL has a predefined set of grantees and permissions. For more information,\n see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly using the x-amz-grant-read,\n x-amz-grant-write, x-amz-grant-read-acp,\n x-amz-grant-write-acp, and x-amz-grant-full-control\n headers. These headers map to the set of permissions Amazon S3 supports in an ACL. For\n more information, see Access control list\n (ACL) overview.

                                                                        \n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-read header grants the Amazon Web Services accounts identified by account IDs permissions to read object data and its metadata:

                                                                        \n

                                                                        \n x-amz-grant-read: id=\"11112222333\", id=\"444455556666\" \n

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot\n do both.

                                                                      \n
                                                                      \n\n

                                                                      \n Permissions\n

                                                                      \n

                                                                      In addition to s3:CreateBucket, the following permissions are required when your CreateBucket includes specific headers:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n ACLs - If your CreateBucket request specifies ACL permissions and the ACL is public-read, public-read-write, \n authenticated-read, or if you specify access permissions explicitly through any other ACL, both \n s3:CreateBucket and s3:PutBucketAcl permissions are needed. If the ACL the \n CreateBucket request is private or doesn't specify any ACLs, only s3:CreateBucket permission is needed.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Object Lock - If\n ObjectLockEnabledForBucket is set to true in your\n CreateBucket request,\n s3:PutBucketObjectLockConfiguration and\n s3:PutBucketVersioning permissions are required.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n S3 Object Ownership - If your CreateBucket\n request includes the the x-amz-object-ownership header,\n s3:PutBucketOwnershipControls permission is required.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      The following operations are related to CreateBucket:

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}", @@ -1978,6 +1978,12 @@ "smithy.api#documentation": "

                                                                      Specifies whether you want S3 Object Lock to be enabled for the new bucket.

                                                                      ", "smithy.api#httpHeader": "x-amz-bucket-object-lock-enabled" } + }, + "ObjectOwnership": { + "target": "com.amazonaws.s3#ObjectOwnership", + "traits": { + "smithy.api#httpHeader": "x-amz-object-ownership" + } } } }, @@ -2490,7 +2496,7 @@ "target": "com.amazonaws.s3#DeleteBucketIntelligentTieringConfigurationRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n DeleteBucketIntelligentTieringConfiguration include:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Deletes the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n DeleteBucketIntelligentTieringConfiguration include:

                                                                      \n ", "smithy.api#http": { "method": "DELETE", "uri": "/{Bucket}?intelligent-tiering", @@ -3481,10 +3487,47 @@ }, { "value": "s3:Replication:OperationReplicatedAfterThreshold" + }, + { + "value": "s3:ObjectRestore:Delete" + }, + { + "value": "s3:LifecycleTransition" + }, + { + "value": "s3:IntelligentTiering" + }, + { + "value": "s3:ObjectAcl:Put" + }, + { + "value": "s3:LifecycleExpiration:*" + }, + { + "value": "s3:LifecycleExpiration:Delete" + }, + { + "value": "s3:LifecycleExpiration:DeleteMarkerCreated" + }, + { + "value": "s3:ObjectTagging:*" + }, + { + "value": "s3:ObjectTagging:Put" + }, + { + "value": "s3:ObjectTagging:Delete" } ] } }, + "com.amazonaws.s3#EventBridgeConfiguration": { + "type": "structure", + "members": {}, + "traits": { + "smithy.api#documentation": "

                                                                      A container for specifying the configuration for Amazon EventBridge.

                                                                      " + } + }, "com.amazonaws.s3#EventList": { "type": "list", "member": { @@ -3700,7 +3743,7 @@ "target": "com.amazonaws.s3#GetBucketAclOutput" }, "traits": { - "smithy.api#documentation": "

                                                                      This implementation of the GET action uses the acl\n subresource to return the access control list (ACL) of a bucket. To use GET to\n return the ACL of the bucket, you must have READ_ACP access to the bucket. If\n READ_ACP permission is granted to the anonymous user, you can return the\n ACL of the bucket without using an authorization header.

                                                                      \n \n

                                                                      \n Related Resources\n

                                                                      \n ", + "smithy.api#documentation": "

                                                                      This implementation of the GET action uses the acl\n subresource to return the access control list (ACL) of a bucket. To use GET to\n return the ACL of the bucket, you must have READ_ACP access to the bucket. If\n READ_ACP permission is granted to the anonymous user, you can return the\n ACL of the bucket without using an authorization header.

                                                                      \n \n

                                                                      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, \n requests to read ACLs are still supported and return the bucket-owner-full-control \n ACL with the owner being the account that created the bucket. For more information, see \n \n Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                                                      \n
                                                                      \n \n

                                                                      \n Related Resources\n

                                                                      \n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?acl", @@ -3916,7 +3959,7 @@ "target": "com.amazonaws.s3#GetBucketIntelligentTieringConfigurationOutput" }, "traits": { - "smithy.api#documentation": "

                                                                      Gets the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n GetBucketIntelligentTieringConfiguration include:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Gets the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n GetBucketIntelligentTieringConfiguration include:

                                                                      \n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=GetBucketIntelligentTieringConfiguration", @@ -4270,7 +4313,7 @@ "target": "com.amazonaws.s3#GetBucketOwnershipControlsOutput" }, "traits": { - "smithy.api#documentation": "

                                                                      Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:GetBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying\n Permissions in a Policy.

                                                                      \n

                                                                      For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                                                      \n

                                                                      The following operations are related to GetBucketOwnershipControls:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Retrieves OwnershipControls for an Amazon S3 bucket. To use this operation, you\n must have the s3:GetBucketOwnershipControls permission. For more information\n about Amazon S3 permissions, see Specifying\n permissions in a policy.

                                                                      \n

                                                                      For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                                                      \n

                                                                      The following operations are related to GetBucketOwnershipControls:

                                                                      \n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?ownershipControls", @@ -4284,7 +4327,7 @@ "OwnershipControls": { "target": "com.amazonaws.s3#OwnershipControls", "traits": { - "smithy.api#documentation": "

                                                                      The OwnershipControls (BucketOwnerPreferred or ObjectWriter) currently in\n effect for this Amazon S3 bucket.

                                                                      ", + "smithy.api#documentation": "

                                                                      The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) currently in\n effect for this Amazon S3 bucket.

                                                                      ", "smithy.api#httpPayload": {} } } @@ -4725,7 +4768,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the access control list (ACL) of an object. To use this operation, you must have\n READ_ACP access to the object.

                                                                      \n

                                                                      This action is not supported by Amazon S3 on Outposts.

                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      By default, GET returns ACL information about the current version of an object. To\n return ACL information about a different version, use the versionId subresource.

                                                                      \n\n

                                                                      The following operations are related to GetObjectAcl:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Returns the access control list (ACL) of an object. To use this operation, you must have\n READ_ACP access to the object.

                                                                      \n

                                                                      This action is not supported by Amazon S3 on Outposts.

                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      By default, GET returns ACL information about the current version of an object. To\n return ACL information about a different version, use the versionId subresource.

                                                                      \n \n

                                                                      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, \n requests to read ACLs are still supported and return the bucket-owner-full-control \n ACL with the owner being the account that created the bucket. For more information, see \n \n Controlling object ownership and disabling ACLs in the Amazon S3 User Guide.

                                                                      \n
                                                                      \n

                                                                      The following operations are related to GetObjectAcl:

                                                                      \n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}/{Key+}?acl", @@ -6985,7 +7028,7 @@ "target": "com.amazonaws.s3#ListBucketIntelligentTieringConfigurationsOutput" }, "traits": { - "smithy.api#documentation": "

                                                                      Lists the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n ListBucketIntelligentTieringConfigurations include:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Lists the S3 Intelligent-Tiering configuration from the specified bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n ListBucketIntelligentTieringConfigurations include:

                                                                      \n ", "smithy.api#http": { "method": "GET", "uri": "/{Bucket}?intelligent-tiering&x-id=ListBucketIntelligentTieringConfigurations", @@ -8096,7 +8139,7 @@ "TargetGrants": { "target": "com.amazonaws.s3#TargetGrants", "traits": { - "smithy.api#documentation": "

                                                                      Container for granting information.

                                                                      " + "smithy.api#documentation": "

                                                                      Container for granting information.

                                                                      \n

                                                                      Buckets that use the bucket owner enforced setting for Object\n Ownership don't support target grants. For more information, see Permissions for server access log delivery in the\n Amazon S3 User Guide.

                                                                      " } }, "TargetPrefix": { @@ -8482,7 +8525,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Container for the transition rule that describes when noncurrent objects transition to\n the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,\n GLACIER, or DEEP_ARCHIVE storage class. If your bucket is\n versioning-enabled (or versioning is suspended), you can set this action to request that\n Amazon S3 transition noncurrent object versions to the STANDARD_IA,\n ONEZONE_IA, INTELLIGENT_TIERING, GLACIER, or\n DEEP_ARCHIVE storage class at a specific period in the object's\n lifetime.

                                                                      " + "smithy.api#documentation": "

                                                                      Container for the transition rule that describes when noncurrent objects transition to\n the STANDARD_IA, ONEZONE_IA, INTELLIGENT_TIERING,\n GLACIER_IR, GLACIER, or DEEP_ARCHIVE storage class. If your bucket is\n versioning-enabled (or versioning is suspended), you can set this action to request that\n Amazon S3 transition noncurrent object versions to the STANDARD_IA,\n ONEZONE_IA, INTELLIGENT_TIERING, GLACIER_IR, GLACIER, or\n DEEP_ARCHIVE storage class at a specific period in the object's\n lifetime.

                                                                      " } }, "com.amazonaws.s3#NoncurrentVersionTransitionList": { @@ -8525,6 +8568,12 @@ "smithy.api#xmlFlattened": {}, "smithy.api#xmlName": "CloudFunctionConfiguration" } + }, + "EventBridgeConfiguration": { + "target": "com.amazonaws.s3#EventBridgeConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      Enables delivery of events to Amazon EventBridge.

                                                                      " + } } }, "traits": { @@ -8826,7 +8875,7 @@ "com.amazonaws.s3#ObjectOwnership": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The container element for object ownership for a bucket's ownership controls.

                                                                      \n

                                                                      BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket\n owner if the objects are uploaded with the bucket-owner-full-control canned\n ACL.

                                                                      \n

                                                                      ObjectWriter - The uploading account will own the object if the object is uploaded with\n the bucket-owner-full-control canned ACL.

                                                                      ", + "smithy.api#documentation": "

                                                                      The container element for object ownership for a bucket's ownership controls.

                                                                      \n

                                                                      BucketOwnerPreferred - Objects uploaded to the bucket change ownership to the bucket\n owner if the objects are uploaded with the bucket-owner-full-control canned\n ACL.

                                                                      \n

                                                                      ObjectWriter - The uploading account will own the object if the object is uploaded with\n the bucket-owner-full-control canned ACL.

                                                                      \n

                                                                      BucketOwnerEnforced - Access control lists (ACLs) are disabled and no longer affect permissions. \n The bucket owner automatically owns and has full control over every object in the bucket. The bucket only\n accepts PUT requests that don't specify an ACL or bucket owner full control\n ACLs, such as the bucket-owner-full-control canned\n ACL or an equivalent form of this ACL expressed in the XML format.

                                                                      ", "smithy.api#enum": [ { "value": "BucketOwnerPreferred", @@ -8835,6 +8884,10 @@ { "value": "ObjectWriter", "name": "ObjectWriter" + }, + { + "value": "BucketOwnerEnforced", + "name": "BucketOwnerEnforced" } ] } @@ -8880,6 +8933,10 @@ { "value": "OUTPOSTS", "name": "OUTPOSTS" + }, + { + "value": "GLACIER_IR", + "name": "GLACIER_IR" } ] } @@ -9323,7 +9380,7 @@ "target": "com.amazonaws.s3#PutBucketAclRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set\n the ACL of a bucket, you must have WRITE_ACP permission.

                                                                      \n\n

                                                                      You can use one of the following two ways to set a bucket's permissions:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify the ACL in the request body

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify permissions using request headers

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      You cannot specify access permission using both the body and the request\n headers.

                                                                      \n
                                                                      \n\n

                                                                      Depending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.

                                                                      \n\n\n

                                                                      \n Access Permissions\n

                                                                      \n

                                                                      You can set access permissions using one of the following methods:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly with the x-amz-grant-read,\n x-amz-grant-read-acp, x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.

                                                                        \n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-write header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.

                                                                        \n

                                                                        \n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\" \n

                                                                        \n\n
                                                                      • \n
                                                                      \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.

                                                                      \n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>lt;/Grantee>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n \n

                                                                        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          US East (N. Virginia)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (N. California)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (Oregon)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Singapore)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Sydney)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Tokyo)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Europe (Ireland)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          South America (São Paulo)

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      \n\n\n

                                                                      \n Related Resources\n

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Sets the permissions on an existing bucket using access control lists (ACL). For more\n information, see Using ACLs. To set\n the ACL of a bucket, you must have WRITE_ACP permission.

                                                                      \n\n

                                                                      You can use one of the following two ways to set a bucket's permissions:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify the ACL in the request body

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify permissions using request headers

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      You cannot specify access permission using both the body and the request\n headers.

                                                                      \n
                                                                      \n\n

                                                                      Depending on your application needs, you may choose to set the ACL on a bucket using\n either the request body or the headers. For example, if you have an existing application\n that updates a bucket ACL using the request body, then you can continue to use that\n approach.

                                                                      \n\n \n

                                                                      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. \n You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and \n return the AccessControlListNotSupported error code. Requests to read ACLs are still supported.\n For more information, see Controlling object ownership\n in the Amazon S3 User Guide.

                                                                      \n
                                                                      \n

                                                                      \n Access Permissions\n

                                                                      \n

                                                                      You can set access permissions using one of the following methods:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL\n has a predefined set of grantees and permissions. Specify the canned ACL name as the\n value of x-amz-acl. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly with the x-amz-grant-read,\n x-amz-grant-read-acp, x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n the x-amz-acl header to set a canned ACL. These parameters map to the\n set of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.

                                                                        \n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-write header grants create,\n overwrite, and delete objects permission to LogDelivery group predefined by Amazon S3 and\n two Amazon Web Services accounts identified by their email addresses.

                                                                        \n

                                                                        \n x-amz-grant-write: uri=\"http://acs.amazonaws.com/groups/s3/LogDelivery\",\n id=\"111122223333\", id=\"555566667777\" \n

                                                                        \n\n
                                                                      • \n
                                                                      \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.

                                                                      \n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>lt;/Grantee>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n \n

                                                                        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          US East (N. Virginia)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (N. California)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (Oregon)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Singapore)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Sydney)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Tokyo)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Europe (Ireland)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          South America (São Paulo)

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      \n\n\n

                                                                      \n Related Resources\n

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?acl", @@ -9567,7 +9624,7 @@ "target": "com.amazonaws.s3#PutBucketIntelligentTieringConfigurationRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in two low latency and high throughput access tiers. For data that can be accessed asynchronously, you can choose to activate automatic archiving capabilities within the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n PutBucketIntelligentTieringConfiguration include:

                                                                      \n \n \n

                                                                      You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.

                                                                      \n
                                                                      \n \n

                                                                      \n Special Errors\n

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n HTTP 400 Bad Request Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: InvalidArgument

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: Invalid Argument

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n HTTP 400 Bad Request Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: TooManyConfigurations

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n HTTP 403 Forbidden Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: AccessDenied

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutIntelligentTieringConfiguration bucket\n permission to set the configuration on the bucket.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Puts a S3 Intelligent-Tiering configuration to the specified bucket.\n You can have up to 1,000 S3 Intelligent-Tiering configurations per bucket.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is designed to optimize storage costs by automatically moving data to the most cost-effective storage access tier, without performance impact or operational overhead. S3 Intelligent-Tiering delivers automatic cost savings in three low latency and high throughput access tiers. To get the lowest storage cost on data that can be accessed in minutes to hours, you can choose to activate additional archiving capabilities.

                                                                      \n

                                                                      The S3 Intelligent-Tiering storage class is the ideal storage class for data with unknown, changing, or unpredictable access patterns, independent of object size or retention period. If the size of an object is less than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller objects can be stored, but they are always charged at the Frequent Access tier rates in the S3 Intelligent-Tiering storage class.

                                                                      \n

                                                                      For more information, see Storage class for automatically optimizing frequently and infrequently accessed objects.

                                                                      \n

                                                                      Operations related to\n PutBucketIntelligentTieringConfiguration include:

                                                                      \n \n \n

                                                                      You only need S3 Intelligent-Tiering enabled on a bucket if you want to automatically\n move objects stored in the S3 Intelligent-Tiering storage class to the\n Archive Access or Deep Archive Access tier.

                                                                      \n
                                                                      \n \n

                                                                      \n Special Errors\n

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n HTTP 400 Bad Request Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: InvalidArgument

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: Invalid Argument

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n HTTP 400 Bad Request Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: TooManyConfigurations

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: You are attempting to create a new configuration\n but have already reached the 1,000-configuration limit.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n HTTP 403 Forbidden Error\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Code: AccessDenied

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cause: You are not the owner of the specified bucket,\n or you do not have the s3:PutIntelligentTieringConfiguration bucket\n permission to set the configuration on the bucket.

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?intelligent-tiering", @@ -9705,7 +9762,7 @@ "target": "com.amazonaws.s3#PutBucketLoggingRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the\n source bucket. To set the logging status of a bucket, you must be the bucket owner.

                                                                      \n\n

                                                                      The bucket owner is automatically granted FULL_CONTROL to all logs. You use the\n Grantee request element to grant access to other people. The\n Permissions request element specifies the kind of access the grantee has to\n the logs.

                                                                      \n\n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      \n\n\n

                                                                      To enable logging, you use LoggingEnabled and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus request element:

                                                                      \n\n

                                                                      \n \n

                                                                      \n\n

                                                                      For more information about server access logging, see Server Access Logging.

                                                                      \n\n

                                                                      For more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.

                                                                      \n\n

                                                                      The following operations are related to PutBucketLogging:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Set the logging parameters for a bucket and to specify permissions for who can view and\n modify the logging parameters. All logs are saved to buckets in the same Amazon Web Services Region as the\n source bucket. To set the logging status of a bucket, you must be the bucket owner.

                                                                      \n\n

                                                                      The bucket owner is automatically granted FULL_CONTROL to all logs. You use the Grantee request element to grant access to other people. The\n Permissions request element specifies the kind of access the grantee has to\n the logs.

                                                                      \n \n

                                                                      If the target bucket for log delivery uses the bucket owner enforced\n setting for S3 Object Ownership, you can't use the Grantee request element\n to grant access to others. Permissions can only be granted using policies. For more information, see Permissions for server access log delivery in the\n Amazon S3 User Guide.

                                                                      \n
                                                                      \n\n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      \n\n\n

                                                                      To enable logging, you use LoggingEnabled and its children request elements. To disable\n logging, you use an empty BucketLoggingStatus request element:

                                                                      \n\n

                                                                      \n \n

                                                                      \n\n

                                                                      For more information about server access logging, see Server Access Logging in the Amazon S3 User Guide.

                                                                      \n\n

                                                                      For more information about creating a bucket, see CreateBucket. For more\n information about returning the logging status of a bucket, see GetBucketLogging.

                                                                      \n\n

                                                                      The following operations are related to PutBucketLogging:

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?logging", @@ -9840,6 +9897,13 @@ "smithy.api#documentation": "

                                                                      The account ID of the expected bucket owner. If the bucket is owned by a different account, the request will fail with an HTTP 403 (Access Denied) error.

                                                                      ", "smithy.api#httpHeader": "x-amz-expected-bucket-owner" } + }, + "SkipDestinationValidation": { + "target": "com.amazonaws.s3#SkipValidation", + "traits": { + "smithy.api#documentation": "

                                                                      Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or false value.

                                                                      ", + "smithy.api#httpHeader": "x-amz-skip-destination-validation" + } } } }, @@ -9849,7 +9913,7 @@ "target": "com.amazonaws.s3#PutBucketOwnershipControlsRequest" }, "traits": { - "smithy.api#documentation": "

                                                                      Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketOwnershipControls permission. For\n more information about Amazon S3 permissions, see Specifying Permissions in a Policy.

                                                                      \n

                                                                      For information about Amazon S3 Object Ownership, see Using Object Ownership.

                                                                      \n

                                                                      The following operations are related to PutBucketOwnershipControls:

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Creates or modifies OwnershipControls for an Amazon S3 bucket. To use this\n operation, you must have the s3:PutBucketOwnershipControls permission. For\n more information about Amazon S3 permissions, see Specifying permissions in a policy.

                                                                      \n

                                                                      For information about Amazon S3 Object Ownership, see Using object ownership.

                                                                      \n

                                                                      The following operations are related to PutBucketOwnershipControls:

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}?ownershipControls", @@ -9886,7 +9950,7 @@ "OwnershipControls": { "target": "com.amazonaws.s3#OwnershipControls", "traits": { - "smithy.api#documentation": "

                                                                      The OwnershipControls (BucketOwnerPreferred or ObjectWriter) that you want\n to apply to this Amazon S3 bucket.

                                                                      ", + "smithy.api#documentation": "

                                                                      The OwnershipControls (BucketOwnerEnforced, BucketOwnerPreferred, or ObjectWriter) that you want\n to apply to this Amazon S3 bucket.

                                                                      ", "smithy.api#httpPayload": {}, "smithy.api#required": {}, "smithy.api#xmlName": "OwnershipControls" @@ -10228,7 +10292,7 @@ "target": "com.amazonaws.s3#PutObjectOutput" }, "traits": { - "smithy.api#documentation": "

                                                                      Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.

                                                                      \n\n\n

                                                                      Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket.

                                                                      \n\n

                                                                      Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object\n locking; if you need this, make sure to build it into your application layer or use\n versioning instead.

                                                                      \n\n

                                                                      To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

                                                                      \n \n
                                                                        \n
                                                                      • \n

                                                                        To successfully complete the PutObject request, you must have the \n s3:PutObject in your IAM permissions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        To successfully change the objects acl of your PutObject request, \n you must have the s3:PutObjectAcl in your IAM permissions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The Content-MD5 header is required for any request to upload an object\n with a retention period configured using Amazon S3 Object Lock. For more information about\n Amazon S3 Object Lock, see Amazon S3 Object Lock Overview\n in the Amazon S3 User Guide.

                                                                        \n
                                                                      • \n
                                                                      \n
                                                                      \n\n

                                                                      \n Server-side Encryption\n

                                                                      \n

                                                                      You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts \n your data as it writes it to disks in its data centers and decrypts the data\n when you access it. You have the option to provide your own encryption key or use Amazon Web Services\n managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side\n Encryption.

                                                                      \n

                                                                      If you request server-side encryption using Amazon Web Services Key Management Service (SSE-KMS), you can enable \n an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the \n Amazon S3 User Guide.

                                                                      \n

                                                                      \n Access Control List (ACL)-Specific Request\n Headers\n

                                                                      \n

                                                                      You can use headers to grant ACL- based permissions. By default, all objects are\n private. Only the owner has full access control. When adding a new object, you can grant\n permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These\n permissions are then added to the ACL on the object. For more information, see Access Control List\n (ACL) Overview and Managing ACLs Using the REST\n API.

                                                                      \n\n

                                                                      \n Storage Class Options\n

                                                                      \n

                                                                      By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

                                                                      \n\n\n

                                                                      \n Versioning\n

                                                                      \n

                                                                      If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects.

                                                                      \n

                                                                      For more information about versioning, see Adding Objects to\n Versioning Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

                                                                      \n\n\n

                                                                      \n Related Resources\n

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Adds an object to a bucket. You must have WRITE permissions on a bucket to add an object\n to it.

                                                                      \n\n\n

                                                                      Amazon S3 never adds partial objects; if you receive a success response, Amazon S3 added the\n entire object to the bucket.

                                                                      \n\n

                                                                      Amazon S3 is a distributed system. If it receives multiple write requests for the same object\n simultaneously, it overwrites all but the last object written. Amazon S3 does not provide object\n locking; if you need this, make sure to build it into your application layer or use\n versioning instead.

                                                                      \n\n

                                                                      To ensure that data is not corrupted traversing the network, use the\n Content-MD5 header. When you use this header, Amazon S3 checks the object\n against the provided MD5 value and, if they do not match, returns an error. Additionally,\n you can calculate the MD5 while putting an object to Amazon S3 and compare the returned ETag to\n the calculated MD5 value.

                                                                      \n \n
                                                                        \n
                                                                      • \n

                                                                        To successfully complete the PutObject request, you must have the \n s3:PutObject in your IAM permissions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        To successfully change the objects acl of your PutObject request, \n you must have the s3:PutObjectAcl in your IAM permissions.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The Content-MD5 header is required for any request to upload an object\n with a retention period configured using Amazon S3 Object Lock. For more information about\n Amazon S3 Object Lock, see Amazon S3 Object Lock Overview\n in the Amazon S3 User Guide.

                                                                        \n
                                                                      • \n
                                                                      \n
                                                                      \n

                                                                      \n Server-side Encryption\n

                                                                      \n

                                                                      You can optionally request server-side encryption. With server-side encryption, Amazon S3 encrypts \n your data as it writes it to disks in its data centers and decrypts the data\n when you access it. You have the option to provide your own encryption key or use Amazon Web Services\n managed encryption keys (SSE-S3 or SSE-KMS). For more information, see Using Server-Side\n Encryption.

                                                                      \n

                                                                      If you request server-side encryption using Amazon Web Services Key Management Service (SSE-KMS), you can enable \n an S3 Bucket Key at the object-level. For more information, see Amazon S3 Bucket Keys in the \n Amazon S3 User Guide.

                                                                      \n

                                                                      \n Access Control List (ACL)-Specific Request\n Headers\n

                                                                      \n

                                                                      You can use headers to grant ACL- based permissions. By default, all objects are\n private. Only the owner has full access control. When adding a new object, you can grant\n permissions to individual Amazon Web Services accounts or to predefined groups defined by Amazon S3. These\n permissions are then added to the ACL on the object. For more information, see Access Control List\n (ACL) Overview and Managing ACLs Using the REST\n API.

                                                                      \n

                                                                      If the bucket that you're uploading objects to uses the bucket owner enforced setting\n for S3 Object Ownership, ACLs are disabled and no longer affect permissions. Buckets that\n use this setting only accept PUT requests that don't specify an ACL or PUT requests that\n specify bucket owner full control ACLs, such as the bucket-owner-full-control canned\n ACL or an equivalent form of this ACL expressed in the XML format. PUT requests that contain other\n ACLs (for example, custom grants to certain Amazon Web Services accounts) fail and return a\n 400 error with the error code\n AccessControlListNotSupported.

                                                                      \n

                                                                      For more information, see Controlling ownership of\n objects and disabling ACLs in the Amazon S3 User Guide.

                                                                      \n \n

                                                                      If your bucket uses the bucket owner enforced setting for Object Ownership, \n all objects written to the bucket by any account will be owned by the bucket owner.

                                                                      \n
                                                                      \n

                                                                      \n Storage Class Options\n

                                                                      \n

                                                                      By default, Amazon S3 uses the STANDARD Storage Class to store newly created objects. The\n STANDARD storage class provides high durability and high availability. Depending on\n performance needs, you can specify a different Storage Class. Amazon S3 on Outposts only uses\n the OUTPOSTS Storage Class. For more information, see Storage Classes in the\n Amazon S3 User Guide.

                                                                      \n\n\n

                                                                      \n Versioning\n

                                                                      \n

                                                                      If you enable versioning for a bucket, Amazon S3 automatically generates a unique version ID\n for the object being stored. Amazon S3 returns this ID in the response. When you enable\n versioning for a bucket, if Amazon S3 receives multiple write requests for the same object\n simultaneously, it stores all of the objects.

                                                                      \n

                                                                      For more information about versioning, see Adding Objects to\n Versioning Enabled Buckets. For information about returning the versioning state\n of a bucket, see GetBucketVersioning.

                                                                      \n\n\n

                                                                      \n Related Resources\n

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?x-id=PutObject", @@ -10250,7 +10314,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Uses the acl subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.

                                                                      \n

                                                                      This action is not supported by Amazon S3 on Outposts.

                                                                      \n

                                                                      Depending on your application needs, you can choose to set\n the ACL on an object using either the request body or the headers. For example, if you have\n an existing application that updates a bucket ACL using the request body, you can continue\n to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

                                                                      \n\n\n\n

                                                                      \n Access Permissions\n

                                                                      \n

                                                                      You can set access permissions using one of the following methods:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-acl. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly with the x-amz-grant-read,\n x-amz-grant-read-acp, x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.

                                                                        \n\n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-read header grants list\n objects permission to the two Amazon Web Services accounts identified by their email\n addresses.

                                                                        \n

                                                                        \n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\" \n

                                                                        \n\n
                                                                      • \n
                                                                      \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.

                                                                      \n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>lt;/Grantee>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n \n

                                                                        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          US East (N. Virginia)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (N. California)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (Oregon)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Singapore)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Sydney)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Tokyo)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Europe (Ireland)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          South America (São Paulo)

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      The ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId subresource.

                                                                      \n

                                                                      \n Related Resources\n

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Uses the acl subresource to set the access control list (ACL) permissions\n for a new or existing object in an S3 bucket. You must have WRITE_ACP\n permission to set the ACL of an object. For more information, see What\n permissions can I grant? in the Amazon S3 User Guide.

                                                                      \n

                                                                      This action is not supported by Amazon S3 on Outposts.

                                                                      \n

                                                                      Depending on your application needs, you can choose to set\n the ACL on an object using either the request body or the headers. For example, if you have\n an existing application that updates a bucket ACL using the request body, you can continue\n to use that approach. For more information, see Access Control List (ACL) Overview in the Amazon S3 User Guide.

                                                                      \n \n

                                                                      If your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs are disabled and no longer affect permissions. \n You must use policies to grant access to your bucket and the objects in it. Requests to set ACLs or update ACLs fail and \n return the AccessControlListNotSupported error code. Requests to read ACLs are still supported.\n For more information, see Controlling object ownership\n in the Amazon S3 User Guide.

                                                                      \n
                                                                      \n\n

                                                                      \n Access Permissions\n

                                                                      \n

                                                                      You can set access permissions using one of the following methods:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Specify a canned ACL with the x-amz-acl request header. Amazon S3 supports\n a set of predefined ACLs, known as canned ACLs. Each canned ACL has a predefined set\n of grantees and permissions. Specify the canned ACL name as the value of\n x-amz-acl. If you use this header, you cannot use other access\n control-specific headers in your request. For more information, see Canned ACL.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Specify access permissions explicitly with the x-amz-grant-read,\n x-amz-grant-read-acp, x-amz-grant-write-acp, and\n x-amz-grant-full-control headers. When using these headers, you\n specify explicit access permissions and grantees (Amazon Web Services accounts or Amazon S3 groups) who\n will receive the permission. If you use these ACL-specific headers, you cannot use\n x-amz-acl header to set a canned ACL. These parameters map to the set\n of permissions that Amazon S3 supports in an ACL. For more information, see Access Control List (ACL)\n Overview.

                                                                        \n\n

                                                                        You specify each grantee as a type=value pair, where the type is one of the\n following:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n id – if the value specified is the canonical user ID of an Amazon Web Services account

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n uri – if you are granting permissions to a predefined\n group

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n emailAddress – if the value specified is the email address of\n an Amazon Web Services account

                                                                          \n \n

                                                                          Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                          \n
                                                                            \n
                                                                          • \n

                                                                            US East (N. Virginia)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (N. California)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            US West (Oregon)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Singapore)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Sydney)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Asia Pacific (Tokyo)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            Europe (Ireland)

                                                                            \n
                                                                          • \n
                                                                          • \n

                                                                            South America (São Paulo)

                                                                            \n
                                                                          • \n
                                                                          \n

                                                                          For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For example, the following x-amz-grant-read header grants list\n objects permission to the two Amazon Web Services accounts identified by their email\n addresses.

                                                                        \n

                                                                        \n x-amz-grant-read: emailAddress=\"xyz@amazon.com\",\n emailAddress=\"abc@amazon.com\" \n

                                                                        \n\n
                                                                      • \n
                                                                      \n

                                                                      You can use either a canned ACL or specify access permissions explicitly. You cannot do\n both.

                                                                      \n

                                                                      \n Grantee Values\n

                                                                      \n

                                                                      You can specify the person (grantee) to whom you're assigning access rights (using\n request elements) in the following ways:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        By the person's ID:

                                                                        \n

                                                                        \n <>ID<><>GranteesEmail<>\n \n

                                                                        \n

                                                                        DisplayName is optional and ignored in the request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By URI:

                                                                        \n

                                                                        \n <>http://acs.amazonaws.com/groups/global/AuthenticatedUsers<>\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        By Email address:

                                                                        \n

                                                                        \n <>Grantees@email.com<>lt;/Grantee>\n

                                                                        \n

                                                                        The grantee is resolved to the CanonicalUser and, in a response to a GET Object\n acl request, appears as the CanonicalUser.

                                                                        \n \n

                                                                        Using email addresses to specify a grantee is only supported in the following Amazon Web Services Regions:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          US East (N. Virginia)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (N. California)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          US West (Oregon)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Singapore)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Sydney)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Asia Pacific (Tokyo)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Europe (Ireland)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          South America (São Paulo)

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        For a list of all the Amazon S3 supported Regions and endpoints, see Regions and Endpoints in the Amazon Web Services General Reference.

                                                                        \n
                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      \n Versioning\n

                                                                      \n

                                                                      The ACL of an object is set at the object version level. By default, PUT sets the ACL of\n the current version of an object. To set the ACL of a different version, use the\n versionId subresource.

                                                                      \n

                                                                      \n Related Resources\n

                                                                      \n ", "smithy.api#http": { "method": "PUT", "uri": "/{Bucket}/{Key+}?acl", @@ -12160,6 +12224,9 @@ "com.amazonaws.s3#Size": { "type": "long" }, + "com.amazonaws.s3#SkipValidation": { + "type": "boolean" + }, "com.amazonaws.s3#SourceSelectionCriteria": { "type": "structure", "members": { @@ -12292,6 +12359,10 @@ { "value": "OUTPOSTS", "name": "OUTPOSTS" + }, + { + "value": "GLACIER_IR", + "name": "GLACIER_IR" } ] } @@ -12443,7 +12514,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Container for granting information.

                                                                      " + "smithy.api#documentation": "

                                                                      Container for granting information.

                                                                      \n

                                                                      Buckets that use the bucket owner enforced setting for Object\n Ownership don't support target grants. For more information, see Permissions server access log delivery in the\n Amazon S3 User Guide.

                                                                      " } }, "com.amazonaws.s3#TargetGrants": { @@ -12603,6 +12674,10 @@ { "value": "DEEP_ARCHIVE", "name": "DEEP_ARCHIVE" + }, + { + "value": "GLACIER_IR", + "name": "GLACIER_IR" } ] } diff --git a/codegen/sdk-codegen/aws-models/snowball.json b/codegen/sdk-codegen/aws-models/snowball.json index b461a7e4e992..3af40ed12ae2 100644 --- a/codegen/sdk-codegen/aws-models/snowball.json +++ b/codegen/sdk-codegen/aws-models/snowball.json @@ -31,6 +31,21 @@ "shapes": { "com.amazonaws.snowball#AWSIESnowballJobManagementService": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Snowball", + "arnNamespace": "snowball", + "cloudFormationName": "Snowball", + "cloudTrailEventSource": "snowball.amazonaws.com", + "endpointPrefix": "snowball" + }, + "aws.auth#sigv4": { + "name": "snowball" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "

                                                                      The Amazon Web Services Snow Family provides a petabyte-scale data transport solution that uses\n secure devices to transfer large amounts of data between your on-premises data centers and\n Amazon Simple Storage Service (Amazon S3). The Snow Family commands described here provide access to the same\n functionality that is available in the Amazon Web Services Snow Family Management Console, which enables you to create\n and manage jobs for a Snow Family device. To transfer data locally with a Snow Family device,\n you'll need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or OpsHub for Snow Family. For more information, see the User Guide.

                                                                      ", + "smithy.api#title": "Amazon Import/Export Snowball" + }, "version": "2016-06-30", "operations": [ { @@ -108,22 +123,7 @@ { "target": "com.amazonaws.snowball#UpdateLongTermPricing" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Snowball", - "arnNamespace": "snowball", - "cloudFormationName": "Snowball", - "cloudTrailEventSource": "snowball.amazonaws.com", - "endpointPrefix": "snowball" - }, - "aws.auth#sigv4": { - "name": "snowball" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

                                                                      AWS Snow Family is a petabyte-scale data transport solution that uses secure devices to\n transfer large amounts of data between your on-premises data centers and Amazon Simple Storage\n Service (Amazon S3). The Snow commands described here provide access to the same\n functionality that is available in the AWS Snow Family Management Console, which enables you to\n create and manage jobs for a Snow device. To transfer data locally with a Snow device, you'll\n need to use the Snowball Edge client or the Amazon S3 API Interface for Snowball or AWS OpsHub for Snow Family. For more\n information, see the User Guide.

                                                                      ", - "smithy.api#title": "Amazon Import/Export Snowball" - } + ] }, "com.amazonaws.snowball#Address": { "type": "structure", @@ -214,7 +214,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The address that you want the Snow device(s) associated with a specific job to\n be shipped to. Addresses are validated at the time of creation. The address you provide must\n be located within the serviceable area of your region. Although no individual elements of the\n Address are required, if the address is invalid or unsupported, then an\n exception is thrown.

                                                                      " + "smithy.api#documentation": "

                                                                      The address that you want the Snow device(s) associated with a specific job to be\n shipped to. Addresses are validated at the time of creation. The address you provide must be\n located within the serviceable area of your region. Although no individual elements of the\n Address are required, if the address is invalid or unsupported, then an\n exception is thrown.

                                                                      " } }, "com.amazonaws.snowball#AddressId": { @@ -402,13 +402,13 @@ "KmsKeyARN": { "target": "com.amazonaws.snowball#KmsKeyARN", "traits": { - "smithy.api#documentation": "

                                                                      The KmsKeyARN Amazon Resource Name (ARN) associated with this cluster.\n This ARN was created using the CreateKey API action in AWS Key\n Management Service (AWS KMS).

                                                                      " + "smithy.api#documentation": "

                                                                      The KmsKeyARN Amazon Resource Name (ARN) associated with this cluster.\n This ARN was created using the CreateKey API action in Key Management Service (KMS.

                                                                      " } }, "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The role ARN associated with this cluster. This ARN was created using the CreateRole\n API action in AWS Identity and Access Management (IAM).

                                                                      " + "smithy.api#documentation": "

                                                                      The role ARN associated with this cluster. This ARN was created using the CreateRole\n API action in Identity and Access Management (IAM).

                                                                      " } }, "ClusterState": { @@ -426,7 +426,7 @@ "SnowballType": { "target": "com.amazonaws.snowball#SnowballType", "traits": { - "smithy.api#documentation": "

                                                                      The type of AWS Snow device to use for this cluster. \n

                                                                      \n \n

                                                                      For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The type of Snowcone device to use for this cluster.\n

                                                                      \n \n

                                                                      For cluster jobs, Amazon Web Services Snow Family currently supports only the\n EDGE device type.

                                                                      \n
                                                                      " } }, "CreationDate": { @@ -450,7 +450,7 @@ "ShippingOption": { "target": "com.amazonaws.snowball#ShippingOption", "traits": { - "smithy.api#documentation": "

                                                                      The shipping speed for each node in this cluster. This speed doesn't dictate how soon\n you'll get each device, rather it represents how quickly each device moves to\n its destination while in transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically,\n Snow devices shipped express are delivered in about a day. In addition, most countries\n in the EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The shipping speed for each node in this cluster. This speed doesn't dictate how soon\n you'll get each device, rather it represents how quickly each device moves to its destination\n while in transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically, Snow\n devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " } }, "Notification": { @@ -468,13 +468,13 @@ "TaxDocuments": { "target": "com.amazonaws.snowball#TaxDocuments", "traits": { - "smithy.api#documentation": "

                                                                      The tax documents required in your AWS Region.

                                                                      " + "smithy.api#documentation": "

                                                                      The tax documents required in your Amazon Web Services Region.

                                                                      " } }, "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Represents metadata and configuration settings for services on an AWS Snow Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents metadata and configuration settings for services on an Amazon Web Services Snow Family\n device.

                                                                      " } } }, @@ -546,7 +546,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      You get this exception when you call CreateReturnShippingLabel more than once when other requests are not completed.

                                                                      ", + "smithy.api#documentation": "

                                                                      You get this exception when you call CreateReturnShippingLabel more than once\n when other requests are not completed.

                                                                      ", "smithy.api#error": "client" } }, @@ -588,7 +588,7 @@ "AddressId": { "target": "com.amazonaws.snowball#String", "traits": { - "smithy.api#documentation": "

                                                                      The automatically generated ID for a specific address. You'll use this ID when you\n create a job to specify which address you want the Snow device for that job shipped to.

                                                                      " + "smithy.api#documentation": "

                                                                      The automatically generated ID for a specific address. You'll use this ID when you\n create a job to specify which address you want the Snow device for that job shipped\n to.

                                                                      " } } } @@ -632,14 +632,14 @@ "Resources": { "target": "com.amazonaws.snowball#JobResource", "traits": { - "smithy.api#documentation": "

                                                                      The resources associated with the cluster job. These resources include Amazon S3\n buckets and optional AWS Lambda functions written in the Python language.

                                                                      ", + "smithy.api#documentation": "

                                                                      The resources associated with the cluster job. These resources include Amazon S3\n buckets and optional Lambda functions written in the Python language.\n

                                                                      ", "smithy.api#required": {} } }, "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your transferred data\n will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS\n (Network File System).

                                                                      " } }, "Description": { @@ -658,27 +658,27 @@ "KmsKeyARN": { "target": "com.amazonaws.snowball#KmsKeyARN", "traits": { - "smithy.api#documentation": "

                                                                      The KmsKeyARN value that you want to associate with this cluster.\n KmsKeyARN values are created by using the CreateKey API action in AWS Key\n Management Service (AWS KMS).

                                                                      " + "smithy.api#documentation": "

                                                                      The KmsKeyARN value that you want to associate with this cluster.\n KmsKeyARN values are created by using the CreateKey API action in Key Management Service (KMS).

                                                                      " } }, "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The RoleARN that you want to associate with this cluster.\n RoleArn values are created by using the CreateRole API action in AWS\n Identity and Access Management (IAM).

                                                                      ", + "smithy.api#documentation": "

                                                                      The RoleARN that you want to associate with this cluster.\n RoleArn values are created by using the CreateRole API action in Identity and Access Management (IAM).

                                                                      ", "smithy.api#required": {} } }, "SnowballType": { "target": "com.amazonaws.snowball#SnowballType", "traits": { - "smithy.api#documentation": "

                                                                      The type of AWS Snow Family device to use for this cluster. \n

                                                                      \n \n

                                                                      For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                                                      \n
                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The type of Snow Family Devices to use for this cluster.

                                                                      \n \n

                                                                      For cluster jobs, Amazon Web Services Snow Family currently supports only the\n EDGE device type.

                                                                      \n
                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      ", "smithy.api#required": {} } }, "ShippingOption": { "target": "com.amazonaws.snowball#ShippingOption", "traits": { - "smithy.api#documentation": "

                                                                      The shipping speed for each node in this cluster. This speed doesn't dictate how soon\n you'll get each Snowball Edge device, rather it represents how quickly each device moves to\n its destination while in transit. Regional shipping speeds are as follows:\n

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically,\n Snow devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the United States of America (US), you have access to one-day shipping and\n two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically,\n Snow devices shipped express are delivered in about a day. In addition, most countries\n in the EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The shipping speed for each node in this cluster. This speed doesn't dictate how soon\n you'll get each Snowball Edge device, rather it represents how quickly each device moves to\n its destination while in transit. Regional shipping speeds are as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically, Snow\n devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the United States of America (US), you have access to one-day shipping and\n two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically, Snow\n devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } }, @@ -697,7 +697,7 @@ "TaxDocuments": { "target": "com.amazonaws.snowball#TaxDocuments", "traits": { - "smithy.api#documentation": "

                                                                      The tax documents required in your AWS Region.

                                                                      " + "smithy.api#documentation": "

                                                                      The tax documents required in your Amazon Web Services Region.

                                                                      " } }, "RemoteManagement": { @@ -745,7 +745,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a job to import or export data between Amazon S3 and your on-premises data\n center. Your AWS account must have the right trust policies and permissions in place to create\n a job for a Snow device. If you're creating a job for a node in a cluster, you only need to provide\n the clusterId value; the other job attributes are inherited from the cluster.\n

                                                                      \n \n

                                                                      Only the Snowball; Edge device type is supported when ordering clustered jobs.

                                                                      \n

                                                                      The device capacity is optional.

                                                                      \n

                                                                      Availability of device types differ by AWS Region. For more information about Region\n availability, see AWS Regional Services.

                                                                      \n
                                                                      \n \n

                                                                      \n \n

                                                                      \n AWS Snow Family device types and their capacities.\n

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Snow Family device type: SNC1_SSD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T14

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowcone

                                                                          \n
                                                                        • \n
                                                                        \n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Snow Family device type: SNC1_HDD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T8

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowcone

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_S\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T98

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Storage Optimized for data transfer only

                                                                          \n
                                                                        • \n
                                                                        \n \n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_CG\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T42

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Compute Optimized with GPU

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_C\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T42

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Compute Optimized without GPU

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T100

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Storage Optimized with EC2 Compute

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: STANDARD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T50

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Original Snowball device

                                                                          \n \n

                                                                          This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: STANDARD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T80

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Original Snowball device

                                                                          \n \n

                                                                          This device is only available in the Ningxia, Beijing, and Singapore AWS Regions.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Creates a job to import or export data between Amazon S3 and your on-premises data\n center. Your Amazon Web Services account must have the right trust policies and permissions in\n place to create a job for a Snow device. If you're creating a job for a node in a cluster, you\n only need to provide the clusterId value; the other job attributes are inherited\n from the cluster.

                                                                      \n \n

                                                                      Only the Snowball; Edge device type is supported when ordering clustered jobs.

                                                                      \n

                                                                      The device capacity is optional.

                                                                      \n

                                                                      Availability of device types differ by Amazon Web Services Region. For more information\n about Region availability, see Amazon Web Services Regional Services.

                                                                      \n
                                                                      \n\n

                                                                      \n\n

                                                                      \n Snow Family Devices and their capacities.\n

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Snow Family device type: SNC1_SSD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T14

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowcone

                                                                          \n
                                                                        • \n
                                                                        \n\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Snow Family device type: SNC1_HDD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T8

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowcone

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_S\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T98

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Storage Optimized for data transfer only

                                                                          \n
                                                                        • \n
                                                                        \n\n\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_CG\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T42

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Compute Optimized with GPU

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE_C\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T42

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Compute Optimized without GPU

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: EDGE\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T100

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Snowball Edge Storage Optimized with EC2 Compute

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: STANDARD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T50

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Original Snowball device

                                                                          \n \n

                                                                          This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region\n

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Device type: STANDARD\n

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Capacity: T80

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Description: Original Snowball device

                                                                          \n \n

                                                                          This device is only available in the Ningxia, Beijing, and Singapore Amazon Web Services Region.

                                                                          \n
                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.snowball#CreateJobRequest": { @@ -766,7 +766,7 @@ "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your transferred data\n will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File\n System) and the Amazon Web Services Storage Gateway service Tape Gateway type.

                                                                      " } }, "Description": { @@ -784,13 +784,13 @@ "KmsKeyARN": { "target": "com.amazonaws.snowball#KmsKeyARN", "traits": { - "smithy.api#documentation": "

                                                                      The KmsKeyARN that you want to associate with this job.\n KmsKeyARNs are created using the CreateKey AWS Key Management\n Service (KMS) API action.

                                                                      " + "smithy.api#documentation": "

                                                                      The KmsKeyARN that you want to associate with this job.\n KmsKeyARNs are created using the CreateKey\n Key Management Service (KMS) API action.

                                                                      " } }, "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The RoleARN that you want to associate with this job.\n RoleArns are created using the CreateRole AWS Identity and\n Access Management (IAM) API action.

                                                                      " + "smithy.api#documentation": "

                                                                      The RoleARN that you want to associate with this job.\n RoleArns are created using the CreateRole\n Identity and Access Management (IAM) API action.

                                                                      " } }, "SnowballCapacityPreference": { @@ -802,7 +802,7 @@ "ShippingOption": { "target": "com.amazonaws.snowball#ShippingOption", "traits": { - "smithy.api#documentation": "

                                                                      The shipping speed for this job. This speed doesn't dictate how soon you'll get the\n Snow device, rather it represents how quickly the Snow device moves to its destination while in\n transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically,\n Snow devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The shipping speed for this job. This speed doesn't dictate how soon you'll get the\n Snow device, rather it represents how quickly the Snow device moves to its destination while\n in transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically, Snow\n devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the US, you have access to one-day shipping and two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " } }, "Notification": { @@ -820,7 +820,7 @@ "SnowballType": { "target": "com.amazonaws.snowball#SnowballType", "traits": { - "smithy.api#documentation": "

                                                                      The type of AWS Snow Family device to use for this job. \n

                                                                      \n \n

                                                                      For cluster jobs, AWS Snow Family currently supports only the EDGE device type.

                                                                      \n
                                                                      \n

                                                                      The type of AWS Snow device to use for this job. Currently, the only supported\n device type for cluster jobs is EDGE.

                                                                      \n

                                                                      For more information, see Snowball Edge Device\n Options in the Snowball Edge Developer Guide.

                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of Snow Family Devices to use for this job.\n

                                                                      \n \n

                                                                      For cluster jobs, Amazon Web Services Snow Family currently supports only the\n EDGE device type.

                                                                      \n
                                                                      \n

                                                                      The type of Amazon Web Services Snow device to use for this job. Currently, the only\n supported device type for cluster jobs is EDGE.

                                                                      \n

                                                                      For more information, see Snowball Edge Device\n Options in the Snowball Edge Developer Guide.

                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      " } }, "ForwardingAddressId": { @@ -832,13 +832,13 @@ "TaxDocuments": { "target": "com.amazonaws.snowball#TaxDocuments", "traits": { - "smithy.api#documentation": "

                                                                      The tax documents required in your AWS Region.

                                                                      " + "smithy.api#documentation": "

                                                                      The tax documents required in your Amazon Web Services Region.

                                                                      " } }, "DeviceConfiguration": { "target": "com.amazonaws.snowball#DeviceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Defines the device configuration for an AWS Snowcone job.

                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Defines the device configuration for an Snowcone job.

                                                                      \n \n

                                                                      For more information, see\n \"https://docs.aws.amazon.com/snowball/latest/snowcone-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide or\n \"https://docs.aws.amazon.com/snowball/latest/developer-guide/snow-device-types.html\" (Snow\n Family Devices and Capacity) in the Snowcone User Guide.

                                                                      " } }, "RemoteManagement": { @@ -880,7 +880,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a job with the long-term usage option for a device. The long-term usage is a\n 1-year or 3-year long-term pricing type for the device. You are billed upfront, and AWS\n provides discounts for long-term pricing.\n

                                                                      " + "smithy.api#documentation": "

                                                                      Creates a job with the long-term usage option for a device. The long-term usage is a\n 1-year or 3-year long-term pricing type for the device. You are billed upfront, and Amazon Web Services provides discounts for long-term pricing.\n

                                                                      " } }, "com.amazonaws.snowball#CreateLongTermPricingRequest": { @@ -896,13 +896,13 @@ "IsLongTermPricingAutoRenew": { "target": "com.amazonaws.snowball#JavaBoolean", "traits": { - "smithy.api#documentation": "

                                                                      Specifies whether the current long-term pricing type for the device should be renewed.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies whether the current long-term pricing type for the device should be\n renewed.

                                                                      " } }, "SnowballType": { "target": "com.amazonaws.snowball#SnowballType", "traits": { - "smithy.api#documentation": "

                                                                      The type of AWS Snow Family device to use for the long-term pricing job.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of Snow Family Devices to use for the long-term pricing job.

                                                                      " } } } @@ -944,7 +944,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a shipping label that will be used to return the Snow device to AWS.

                                                                      " + "smithy.api#documentation": "

                                                                      Creates a shipping label that will be used to return the Snow device to Amazon Web Services.

                                                                      " } }, "com.amazonaws.snowball#CreateReturnShippingLabelRequest": { @@ -960,7 +960,7 @@ "ShippingOption": { "target": "com.amazonaws.snowball#ShippingOption", "traits": { - "smithy.api#documentation": "

                                                                      The shipping speed for a particular job. This speed doesn't dictate how soon the device is returned to AWS.\n This speed represents how quickly it moves to its\n destination while in transit. Regional shipping speeds are as follows:

                                                                      " + "smithy.api#documentation": "

                                                                      The shipping speed for a particular job. This speed doesn't dictate how soon the device\n is returned to Amazon Web Services. This speed represents how quickly it moves to its\n destination while in transit. Regional shipping speeds are as follows:

                                                                      " } } } @@ -971,7 +971,7 @@ "Status": { "target": "com.amazonaws.snowball#ShippingLabelStatus", "traits": { - "smithy.api#documentation": "

                                                                      The status information of the task on a Snow device that is being returned to AWS.

                                                                      " + "smithy.api#documentation": "

                                                                      The status information of the task on a Snow device that is being returned to Amazon Web Services.

                                                                      " } } } @@ -994,18 +994,18 @@ "TotalBytes": { "target": "com.amazonaws.snowball#Long", "traits": { - "smithy.api#documentation": "

                                                                      The total bytes of data for a transfer between a Snow device and Amazon S3. This value is\n set to 0 (zero) until all the keys that will be transferred have been listed.

                                                                      " + "smithy.api#documentation": "

                                                                      The total bytes of data for a transfer between a Snow device and Amazon S3. This value\n is set to 0 (zero) until all the keys that will be transferred have been listed.

                                                                      " } }, "TotalObjects": { "target": "com.amazonaws.snowball#Long", "traits": { - "smithy.api#documentation": "

                                                                      The total number of objects for a transfer between a Snow device and Amazon S3. This value\n is set to 0 (zero) until all the keys that will be transferred have been listed.

                                                                      " + "smithy.api#documentation": "

                                                                      The total number of objects for a transfer between a Snow device and Amazon S3. This\n value is set to 0 (zero) until all the keys that will be transferred have been\n listed.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Defines the real-time status of a Snow device's data transfer while the device is at AWS.\n This data is only available while a job has a JobState value of\n InProgress, for both import and export jobs.

                                                                      " + "smithy.api#documentation": "

                                                                      Defines the real-time status of a Snow device's data transfer while the device is at\n Amazon Web Services. This data is only available while a job has a JobState\n value of InProgress, for both import and export jobs.

                                                                      " } }, "com.amazonaws.snowball#DescribeAddress": { @@ -1043,7 +1043,7 @@ "Address": { "target": "com.amazonaws.snowball#Address", "traits": { - "smithy.api#documentation": "

                                                                      The address that you want the Snow device(s) associated with a specific job to\n be shipped to.

                                                                      " + "smithy.api#documentation": "

                                                                      The address that you want the Snow device(s) associated with a specific job to be\n shipped to.

                                                                      " } } } @@ -1214,7 +1214,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Information on the shipping label of a Snow device that is being returned to AWS.

                                                                      " + "smithy.api#documentation": "

                                                                      Information on the shipping label of a Snow device that is being returned to Amazon Web Services.

                                                                      " } }, "com.amazonaws.snowball#DescribeReturnShippingLabelRequest": { @@ -1223,7 +1223,7 @@ "JobId": { "target": "com.amazonaws.snowball#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The automatically generated ID for a job, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", + "smithy.api#documentation": "

                                                                      The automatically generated ID for a job, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", "smithy.api#required": {} } } @@ -1235,7 +1235,7 @@ "Status": { "target": "com.amazonaws.snowball#ShippingLabelStatus", "traits": { - "smithy.api#documentation": "

                                                                      The status information of the task on a Snow device that is being returned to AWS.

                                                                      " + "smithy.api#documentation": "

                                                                      The status information of the task on a Snow device that is being returned to Amazon Web Services.

                                                                      " } }, "ExpirationDate": { @@ -1243,6 +1243,12 @@ "traits": { "smithy.api#documentation": "

                                                                      The expiration date of the current return shipping label.

                                                                      " } + }, + "ReturnShippingLabelURI": { + "target": "com.amazonaws.snowball#String", + "traits": { + "smithy.api#documentation": "

                                                                      The pre-signed Amazon S3 URI used to download the return shipping label.

                                                                      " + } } } }, @@ -1252,7 +1258,7 @@ "SnowconeDeviceConfiguration": { "target": "com.amazonaws.snowball#SnowconeDeviceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Returns information about the device configuration for an AWS Snowcone job.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns information about the device configuration for an Snowcone\n job.

                                                                      " } } }, @@ -1293,7 +1299,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      A JSON-formatted object that contains the IDs for an Amazon Machine Image (AMI),\n including the Amazon EC2 AMI ID and the Snow device AMI ID. Each AMI has these two IDs to\n simplify identifying the AMI in both the AWS Cloud and on the device.

                                                                      " + "smithy.api#documentation": "

                                                                      A JSON-formatted object that contains the IDs for an Amazon Machine Image (AMI),\n including the Amazon EC2 AMI ID and the Snow device AMI ID. Each AMI has these two IDs to\n simplify identifying the AMI in both the Amazon Web Services Cloud and on the device.

                                                                      " } }, "com.amazonaws.snowball#Ec2AmiResourceList": { @@ -1320,7 +1326,7 @@ "EventResourceARN": { "target": "com.amazonaws.snowball#ResourceARN", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an AWS Lambda\n function's event trigger associated with this job.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for any local Amazon S3 resource that is an Lambda function's event trigger associated with this job.

                                                                      " } } }, @@ -1357,7 +1363,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns a link to an Amazon S3 presigned URL for the manifest file associated with the\n specified JobId value. You can access the manifest file for up to 60 minutes\n after this request has been made. To access the manifest file after 60 minutes have passed,\n you'll have to make another call to the GetJobManifest action.

                                                                      \n\n

                                                                      The manifest is an encrypted file that you can download after your job enters the\n WithCustomer status. The manifest is decrypted by using the\n UnlockCode code value, when you pass both values to the Snow device through the\n Snowball client when the client is started for the first time.

                                                                      \n\n\n

                                                                      As a best practice, we recommend that you don't save a copy of an\n UnlockCode value in the same location as the manifest file for that job. Saving\n these separately helps prevent unauthorized parties from gaining access to the Snow device\n associated with that job.

                                                                      \n\n\n

                                                                      The credentials of a given job, including its manifest file and unlock code, expire 360\n days after the job is created.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns a link to an Amazon S3 presigned URL for the manifest file associated with the\n specified JobId value. You can access the manifest file for up to 60 minutes\n after this request has been made. To access the manifest file after 60 minutes have passed,\n you'll have to make another call to the GetJobManifest action.

                                                                      \n\n

                                                                      The manifest is an encrypted file that you can download after your job enters the\n WithCustomer status. The manifest is decrypted by using the\n UnlockCode code value, when you pass both values to the Snow device through the\n Snowball client when the client is started for the first time.

                                                                      \n\n\n

                                                                      As a best practice, we recommend that you don't save a copy of an\n UnlockCode value in the same location as the manifest file for that job. Saving\n these separately helps prevent unauthorized parties from gaining access to the Snow device\n associated with that job.

                                                                      \n\n\n

                                                                      The credentials of a given job, including its manifest file and unlock code, expire 360\n days after the job is created.

                                                                      " } }, "com.amazonaws.snowball#GetJobManifestRequest": { @@ -1400,7 +1406,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns the UnlockCode code value for the specified job. A particular\n UnlockCode value can be accessed for up to 360 days after the associated job\n has been created.

                                                                      \n\n

                                                                      The UnlockCode value is a 29-character code with 25 alphanumeric\n characters and 4 hyphens. This code is used to decrypt the manifest file when it is passed\n along with the manifest to the Snow device through the Snowball client when the client is started\n for the first time.

                                                                      \n\n

                                                                      As a best practice, we recommend that you don't save a copy of the\n UnlockCode in the same location as the manifest file for that job. Saving these\n separately helps prevent unauthorized parties from gaining access to the Snow device associated\n with that job.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the UnlockCode code value for the specified job. A particular\n UnlockCode value can be accessed for up to 360 days after the associated job\n has been created.

                                                                      \n\n

                                                                      The UnlockCode value is a 29-character code with 25 alphanumeric\n characters and 4 hyphens. This code is used to decrypt the manifest file when it is passed\n along with the manifest to the Snow device through the Snowball client when the client is\n started for the first time.

                                                                      \n\n

                                                                      As a best practice, we recommend that you don't save a copy of the\n UnlockCode in the same location as the manifest file for that job. Saving these\n separately helps prevent unauthorized parties from gaining access to the Snow device\n associated with that job.

                                                                      " } }, "com.amazonaws.snowball#GetJobUnlockCodeRequest": { @@ -1435,7 +1441,7 @@ "target": "com.amazonaws.snowball#GetSnowballUsageResult" }, "traits": { - "smithy.api#documentation": "

                                                                      Returns information about the Snow Family service limit for your account, and also the\n number of Snow devices your account has in use.

                                                                      \n\n

                                                                      The default service limit for the number of Snow devices that you can have at one time is\n 1. If you want to increase your service limit, contact AWS Support.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns information about the Snow Family service limit for your account, and also the\n number of Snow devices your account has in use.

                                                                      \n\n

                                                                      The default service limit for the number of Snow devices that you can have at one time\n is 1. If you want to increase your service limit, contact Amazon Web Services Support.

                                                                      " } }, "com.amazonaws.snowball#GetSnowballUsageRequest": { @@ -1476,7 +1482,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns an Amazon S3 presigned URL for an update file associated with a specified\n JobId.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns an Amazon S3 presigned URL for an update file associated with a specified\n JobId.

                                                                      " } }, "com.amazonaws.snowball#GetSoftwareUpdatesRequest": { @@ -1485,7 +1491,7 @@ "JobId": { "target": "com.amazonaws.snowball#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The ID for a job that you want to get the software update file for, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID for a job that you want to get the software update file for, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", "smithy.api#required": {} } } @@ -1497,7 +1503,7 @@ "UpdatesURI": { "target": "com.amazonaws.snowball#String", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon S3 presigned URL for the update file associated with the specified\n JobId value. The software update will be available for 2 days after this request is made. \n To access an update after the 2 days have passed, you'll have to make another call to GetSoftwareUpdates.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon S3 presigned URL for the update file associated with the specified\n JobId value. The software update will be available for 2 days after this\n request is made. To access an update after the 2 days have passed, you'll have to make another\n call to GetSoftwareUpdates.

                                                                      " } } } @@ -1508,12 +1514,12 @@ "GSTIN": { "target": "com.amazonaws.snowball#GSTIN", "traits": { - "smithy.api#documentation": "

                                                                      The Goods and Services Tax (GST) documents required in AWS Regions in India.

                                                                      " + "smithy.api#documentation": "

                                                                      The Goods and Services Tax (GST) documents required in Amazon Web Services Region in\n India.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      The tax documents required in AWS Regions in India.

                                                                      " + "smithy.api#documentation": "

                                                                      The tax documents required in Amazon Web Services Region in India.

                                                                      " } }, "com.amazonaws.snowball#Integer": { @@ -1542,7 +1548,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Job or cluster creation failed. One or more inputs were invalid. Confirm that the\n CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

                                                                      ", + "smithy.api#documentation": "

                                                                      Job or cluster creation failed. One or more inputs were invalid. Confirm that the CreateClusterRequest$SnowballType value supports your CreateJobRequest$JobType, and try again.

                                                                      ", "smithy.api#error": "client" } }, @@ -1622,7 +1628,7 @@ "IsMaster": { "target": "com.amazonaws.snowball#Boolean", "traits": { - "smithy.api#documentation": "

                                                                      A value that indicates that this job is a main job. A main job represents a\n successful request to create an export job. Main jobs aren't associated with any Snowballs.\n Instead, each main job will have at least one job part, and each job part is associated with\n a Snowball. It might take some time before the job parts associated with a particular main\n job are listed, because they are created after the main job is created.

                                                                      " + "smithy.api#documentation": "

                                                                      A value that indicates that this job is a main job. A main job represents a successful\n request to create an export job. Main jobs aren't associated with any Snowballs. Instead, each\n main job will have at least one job part, and each job part is associated with a Snowball. It\n might take some time before the job parts associated with a particular main job are listed,\n because they are created after the main job is created.

                                                                      " } }, "JobType": { @@ -1683,7 +1689,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Contains job logs. Whenever a Snow device is used to import data into or export data out of\n Amazon S3, you'll have the option of downloading a PDF job report. Job logs are returned as a\n part of the response syntax of the DescribeJob action in the\n JobMetadata data type. The job logs can be accessed for up to 60 minutes after\n this request has been made. To access any of the job logs after 60 minutes have passed, you'll\n have to make another call to the DescribeJob action.

                                                                      \n\n

                                                                      For import jobs, the PDF job report becomes available at the end of the import process.\n For export jobs, your job report typically becomes available while the Snow device for your job\n part is being delivered to you.

                                                                      \n\n

                                                                      The job report provides you insight into the state of your Amazon S3 data transfer. The\n report includes details about your job or job part for your records.

                                                                      \n\n

                                                                      For deeper visibility into the status of your transferred objects, you can look at the\n two associated logs: a success log and a failure log. The logs are saved in comma-separated\n value (CSV) format, and the name of each log includes the ID of the job or job part that the\n log describes.

                                                                      " + "smithy.api#documentation": "

                                                                      Contains job logs. Whenever a Snow device is used to import data into or export data\n out of Amazon S3, you'll have the option of downloading a PDF job report. Job logs are\n returned as a part of the response syntax of the DescribeJob action in the\n JobMetadata data type. The job logs can be accessed for up to 60 minutes after\n this request has been made. To access any of the job logs after 60 minutes have passed, you'll\n have to make another call to the DescribeJob action.

                                                                      \n\n

                                                                      For import jobs, the PDF job report becomes available at the end of the import process.\n For export jobs, your job report typically becomes available while the Snow device for your\n job part is being delivered to you.

                                                                      \n\n

                                                                      The job report provides you insight into the state of your Amazon S3 data transfer. The\n report includes details about your job or job part for your records.

                                                                      \n\n

                                                                      For deeper visibility into the status of your transferred objects, you can look at the\n two associated logs: a success log and a failure log. The logs are saved in comma-separated\n value (CSV) format, and the name of each log includes the ID of the job or job part that the\n log describes.

                                                                      " } }, "com.amazonaws.snowball#JobMetadata": { @@ -1734,13 +1740,13 @@ "KmsKeyARN": { "target": "com.amazonaws.snowball#KmsKeyARN", "traits": { - "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for the AWS Key Management Service (AWS KMS) key\n associated with this job. This ARN was created using the CreateKey API action in AWS\n KMS.

                                                                      " + "smithy.api#documentation": "

                                                                      The Amazon Resource Name (ARN) for the Key Management Service (KMS) key\n associated with this job. This ARN was created using the CreateKey API action in KMS.

                                                                      " } }, "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The role ARN associated with this job. This ARN was created using the CreateRole\n API action in AWS Identity and Access Management (IAM).

                                                                      " + "smithy.api#documentation": "

                                                                      The role ARN associated with this job. This ARN was created using the CreateRole\n API action in Identity and Access Management.

                                                                      " } }, "AddressId": { @@ -1770,13 +1776,13 @@ "DataTransferProgress": { "target": "com.amazonaws.snowball#DataTransfer", "traits": { - "smithy.api#documentation": "

                                                                      A value that defines the real-time status of a Snow device's data transfer while the\n device is at AWS. This data is only available while a job has a JobState value of\n InProgress, for both import and export jobs.

                                                                      " + "smithy.api#documentation": "

                                                                      A value that defines the real-time status of a Snow device's data transfer while the\n device is at Amazon Web Services. This data is only available while a job has a\n JobState value of InProgress, for both import and export\n jobs.

                                                                      " } }, "JobLogInfo": { "target": "com.amazonaws.snowball#JobLogs", "traits": { - "smithy.api#documentation": "

                                                                      Links to Amazon S3 presigned URLs for the job report and logs. For import jobs, the PDF\n job report becomes available at the end of the import process. For export jobs, your job\n report typically becomes available while the Snow device for your job part is being delivered to\n you.

                                                                      " + "smithy.api#documentation": "

                                                                      Links to Amazon S3 presigned URLs for the job report and logs. For import jobs, the PDF\n job report becomes available at the end of the import process. For export jobs, your job\n report typically becomes available while the Snow device for your job part is being delivered\n to you.

                                                                      " } }, "ClusterId": { @@ -1794,7 +1800,7 @@ "TaxDocuments": { "target": "com.amazonaws.snowball#TaxDocuments", "traits": { - "smithy.api#documentation": "

                                                                      The metadata associated with the tax documents required in your AWS Region.

                                                                      " + "smithy.api#documentation": "

                                                                      The metadata associated with the tax documents required in your Amazon Web Services Region.

                                                                      " } }, "DeviceConfiguration": { @@ -1815,7 +1821,7 @@ "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Represents metadata and configuration settings for services on an AWS Snow Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents metadata and configuration settings for services on an Amazon Web Services Snow Family\n device.

                                                                      " } } }, @@ -1852,7 +1858,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Contains an array of AWS resource objects. Each object represents an Amazon S3 bucket,\n an AWS Lambda function, or an Amazon Machine Image (AMI) based on Amazon EC2 that is\n associated with a particular job.

                                                                      " + "smithy.api#documentation": "

                                                                      Contains an array of Amazon Web Services resource objects. Each object represents an\n Amazon S3 bucket, an Lambda function, or an Amazon Machine Image (AMI) based\n on Amazon EC2 that is associated with a particular job.

                                                                      " } }, "com.amazonaws.snowball#JobState": { @@ -1947,7 +1953,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The provided AWS Key Management Service key lacks the permissions to perform the\n specified CreateJob or UpdateJob action.

                                                                      ", + "smithy.api#documentation": "

                                                                      The provided Key Management Service key lacks the permissions to perform the specified\n CreateJob or UpdateJob action.

                                                                      ", "smithy.api#error": "client" } }, @@ -1987,7 +1993,7 @@ "LambdaArn": { "target": "com.amazonaws.snowball#ResourceARN", "traits": { - "smithy.api#documentation": "

                                                                      An Amazon Resource Name (ARN) that represents an AWS Lambda function to be triggered by\n PUT object actions on the associated local Amazon S3 resource.

                                                                      " + "smithy.api#documentation": "

                                                                      An Amazon Resource Name (ARN) that represents an Lambda function to be\n triggered by PUT object actions on the associated local Amazon S3 resource.

                                                                      " } }, "EventTriggers": { @@ -2136,7 +2142,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs)\n that are owned by your AWS account that would be supported for use on a Snow device.\n Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM, Ubuntu Server\n 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the AWS\n Marketplace.

                                                                      " + "smithy.api#documentation": "

                                                                      This action returns a list of the different Amazon EC2 Amazon Machine Images (AMIs)\n that are owned by your Amazon Web Services accountthat would be supported for use on a Snow\n device. Currently, supported AMIs are based on the CentOS 7 (x86_64) - with Updates HVM,\n Ubuntu Server 14.04 LTS (HVM), and Ubuntu 16.04 LTS - Xenial (HVM) images, available on the\n Amazon Web Services Marketplace.

                                                                      " } }, "com.amazonaws.snowball#ListCompatibleImagesRequest": { @@ -2272,7 +2278,7 @@ "NextToken": { "target": "com.amazonaws.snowball#String", "traits": { - "smithy.api#documentation": "

                                                                      Because HTTP requests are stateless, this is the starting point for your next list of\n ListLongTermPricing to return.

                                                                      " + "smithy.api#documentation": "

                                                                      Because HTTP requests are stateless, this is the starting point for your next list of\n ListLongTermPricing to return.

                                                                      " } } } @@ -2373,7 +2379,7 @@ "SnowballType": { "target": "com.amazonaws.snowball#SnowballType", "traits": { - "smithy.api#documentation": "

                                                                      The type of AWS Snow Family device associated with this long-term pricing job.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of Snow Family Devices associated with this long-term pricing job.

                                                                      " } }, "JobIds": { @@ -2384,7 +2390,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Each LongTermPricingListEntry object contains information about a long-term pricing type.

                                                                      " + "smithy.api#documentation": "

                                                                      Each LongTermPricingListEntry object contains information about a long-term\n pricing type.

                                                                      " } }, "com.amazonaws.snowball#LongTermPricingType": { @@ -2408,7 +2414,7 @@ "StorageLimit": { "target": "com.amazonaws.snowball#StorageLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum NFS storage for one Snowball Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum NFS storage for one Snow Family device.

                                                                      " } }, "StorageUnit": { @@ -2419,7 +2425,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      An object that represents metadata and configuration settings for NFS service on an AWS Snow Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      An object that represents the metadata and configuration settings for the NFS (Network\n File System) service on an Amazon Web Services Snow Family device.

                                                                      " } }, "com.amazonaws.snowball#Notification": { @@ -2428,7 +2434,7 @@ "SnsTopicARN": { "target": "com.amazonaws.snowball#SnsTopicARN", "traits": { - "smithy.api#documentation": "

                                                                      The new SNS TopicArn that you want to associate with this job. You can\n create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API\n action.

                                                                      \n\n

                                                                      You can subscribe email addresses to an Amazon SNS topic through the AWS Management\n Console, or by using the Subscribe Amazon Simple Notification Service (Amazon SNS) API action.

                                                                      " + "smithy.api#documentation": "

                                                                      The new SNS TopicArn that you want to associate with this job. You can\n create Amazon Resource Names (ARNs) for topics by using the CreateTopic Amazon SNS API\n action.

                                                                      \n\n

                                                                      You can subscribe email addresses to an Amazon SNS topic through the Amazon Web Services Management Console, or by using the Subscribe Amazon Simple Notification\n Service (Amazon SNS) API action.

                                                                      " } }, "JobStatesToNotify": { @@ -2454,12 +2460,18 @@ "NFSOnDeviceService": { "target": "com.amazonaws.snowball#NFSOnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Represents the NFS service on a Snow Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      Represents the NFS (Network File System) service on a Snow Family device.

                                                                      " + } + }, + "TGWOnDeviceService": { + "target": "com.amazonaws.snowball#TGWOnDeviceServiceConfiguration", + "traits": { + "smithy.api#documentation": "

                                                                      Represents the Storage Gateway service Tape Gateway type on a Snow Family device.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      An object that represents metadata and configuration settings for services on an AWS Snow Family device.

                                                                      " + "smithy.api#documentation": "

                                                                      An object that represents the metadata and configuration settings for services on an Amazon Web Services\n Snow Family device.

                                                                      " } }, "com.amazonaws.snowball#RemoteManagement": { @@ -2495,7 +2507,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      You get this exception if you call CreateReturnShippingLabel and a valid return\n shipping label already exists. In this case, use\n DescribeReturnShippingLabel to get the url.

                                                                      ", + "smithy.api#documentation": "

                                                                      You get this exception if you call CreateReturnShippingLabel and a valid\n return shipping label already exists. In this case, use\n DescribeReturnShippingLabel to get the URL.

                                                                      ", "smithy.api#error": "client" } }, @@ -2527,7 +2539,7 @@ "TargetOnDeviceServices": { "target": "com.amazonaws.snowball#TargetOnDeviceServiceList", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your transferred data\n will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File\n System).

                                                                      " } } }, @@ -2582,13 +2594,13 @@ "ShippingOption": { "target": "com.amazonaws.snowball#ShippingOption", "traits": { - "smithy.api#documentation": "

                                                                      The shipping speed for a particular job. This speed doesn't dictate how soon you'll get\n the Snow device from the job's creation date. This speed represents how quickly it moves to its\n destination while in transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically,\n Snow devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the United States of America (US), you have access to one-day shipping and\n two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The shipping speed for a particular job. This speed doesn't dictate how soon you'll get\n the Snow device from the job's creation date. This speed represents how quickly it moves to\n its destination while in transit. Regional shipping speeds are as follows:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        In Australia, you have access to express shipping. Typically, Snow devices shipped\n express are delivered in about a day.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the European Union (EU), you have access to express shipping. Typically, Snow\n devices shipped express are delivered in about a day. In addition, most countries in the\n EU have access to standard shipping, which typically takes less than a week, one\n way.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In India, Snow devices are delivered in one to seven days.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In the United States of America (US), you have access to one-day shipping and\n two-day shipping.

                                                                        \n
                                                                      • \n
                                                                      " } }, "InboundShipment": { "target": "com.amazonaws.snowball#Shipment", "traits": { - "smithy.api#documentation": "

                                                                      The Status and TrackingNumber values for a Snow device being\n returned to AWS for a particular job.

                                                                      " + "smithy.api#documentation": "

                                                                      The Status and TrackingNumber values for a Snow device being\n returned to Amazon Web Services for a particular job.

                                                                      " } }, "OutboundShipment": { @@ -2728,12 +2740,12 @@ "WirelessConnection": { "target": "com.amazonaws.snowball#WirelessConnection", "traits": { - "smithy.api#documentation": "

                                                                      Configures the wireless connection for the AWS Snowcone device.

                                                                      " + "smithy.api#documentation": "

                                                                      Configures the wireless connection for the Snowcone device.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Specifies the device configuration for an AWS Snowcone job.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the device configuration for an Snowcone job.

                                                                      " } }, "com.amazonaws.snowball#SnsTopicARN": { @@ -2774,24 +2786,44 @@ } } }, + "com.amazonaws.snowball#TGWOnDeviceServiceConfiguration": { + "type": "structure", + "members": { + "StorageLimit": { + "target": "com.amazonaws.snowball#StorageLimit", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of virtual tapes to store on one Snow Family device. Due to physical\n resource limitations, this value must be set to 80 for Snowball Edge.

                                                                      " + } + }, + "StorageUnit": { + "target": "com.amazonaws.snowball#StorageUnit", + "traits": { + "smithy.api#documentation": "

                                                                      The scale unit of the virtual tapes on the device.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      An object that represents the metadata and configuration settings for the Storage Gateway\n service Tape Gateway type on an Amazon Web Services Snow Family device.

                                                                      " + } + }, "com.amazonaws.snowball#TargetOnDeviceService": { "type": "structure", "members": { "ServiceName": { "target": "com.amazonaws.snowball#DeviceServiceName", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the name of the service on the Snow Family device that your\n transferred data will be exported from or imported into.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the name of the service on the Snow Family device that your transferred data\n will be exported from or imported into.

                                                                      " } }, "TransferOption": { "target": "com.amazonaws.snowball#TransferOption", "traits": { - "smithy.api#documentation": "

                                                                      Specifies whether the data is being imported or exported. You can import or export the data, or use it locally on the device.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies whether the data is being imported or exported. You can import or export the\n data, or use it locally on the device.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      An object that represents the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      An object that represents the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and\n NFS (Network File System).

                                                                      " } }, "com.amazonaws.snowball#TargetOnDeviceServiceList": { @@ -2808,7 +2840,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The tax documents required in your AWS Region.

                                                                      " + "smithy.api#documentation": "

                                                                      The tax documents required in your Amazon Web Services Region.

                                                                      " } }, "com.amazonaws.snowball#Timestamp": { @@ -2841,7 +2873,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The address is either outside the serviceable area for your region, or an error\n occurred. Check the address with your region's carrier and try again. If the issue persists,\n contact AWS Support.

                                                                      ", + "smithy.api#documentation": "

                                                                      The address is either outside the serviceable area for your region, or an error\n occurred. Check the address with your region's carrier and try again. If the issue persists,\n contact Amazon Web Services Support.

                                                                      ", "smithy.api#error": "client" } }, @@ -2887,7 +2919,7 @@ "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The new role Amazon Resource Name (ARN) that you want to associate with this cluster.\n To create a role ARN, use the CreateRole API action in AWS\n Identity and Access Management (IAM).

                                                                      " + "smithy.api#documentation": "

                                                                      The new role Amazon Resource Name (ARN) that you want to associate with this cluster.\n To create a role ARN, use the CreateRole API action in Identity and Access Management (IAM).

                                                                      " } }, "Description": { @@ -2905,7 +2937,7 @@ "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your transferred data\n will be exported from or imported into. Amazon Web Services Snow Family device clusters support Amazon S3 and NFS\n (Network File System).

                                                                      " } }, "AddressId": { @@ -2983,7 +3015,7 @@ "RoleARN": { "target": "com.amazonaws.snowball#RoleARN", "traits": { - "smithy.api#documentation": "

                                                                      The new role Amazon Resource Name (ARN) that you want to associate with this job. To\n create a role ARN, use the CreateRoleAWS Identity and Access\n Management (IAM) API action.

                                                                      " + "smithy.api#documentation": "

                                                                      The new role Amazon Resource Name (ARN) that you want to associate with this job. To\n create a role ARN, use the CreateRoleIdentity and Access Management\n (IAM) API action.

                                                                      " } }, "Notification": { @@ -3001,7 +3033,7 @@ "OnDeviceServiceConfiguration": { "target": "com.amazonaws.snowball#OnDeviceServiceConfiguration", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your\n transferred data will be exported from or imported into. AWS Snow Family supports Amazon S3 and NFS (Network File System).

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies the service or services on the Snow Family device that your transferred data\n will be exported from or imported into. Amazon Web Services Snow Family supports Amazon S3 and NFS (Network File\n System) and the Amazon Web Services Storage Gateway service Tape Gateway type.

                                                                      " } }, "AddressId": { @@ -3066,14 +3098,14 @@ "JobId": { "target": "com.amazonaws.snowball#JobId", "traits": { - "smithy.api#documentation": "

                                                                      The job ID of the job whose shipment date you want to update, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", + "smithy.api#documentation": "

                                                                      The job ID of the job whose shipment date you want to update, for example\n JID123e4567-e89b-12d3-a456-426655440000.

                                                                      ", "smithy.api#required": {} } }, "ShipmentState": { "target": "com.amazonaws.snowball#ShipmentState", "traits": { - "smithy.api#documentation": "

                                                                      The state of a device when it is being shipped.

                                                                      \n

                                                                      Set to RECEIVED when the device arrives at your location.

                                                                      \n

                                                                      Set to RETURNED when you have returned the device to AWS.

                                                                      ", + "smithy.api#documentation": "

                                                                      The state of a device when it is being shipped.

                                                                      \n

                                                                      Set to RECEIVED when the device arrives at your location.

                                                                      \n

                                                                      Set to RETURNED when you have returned the device to Amazon Web Services.

                                                                      ", "smithy.api#required": {} } } @@ -3113,7 +3145,7 @@ "ReplacementJob": { "target": "com.amazonaws.snowball#JobId", "traits": { - "smithy.api#documentation": "

                                                                      Specifies that a device that is ordered with long-term pricing should be replaced with a new device.

                                                                      " + "smithy.api#documentation": "

                                                                      Specifies that a device that is ordered with long-term pricing should be replaced with a\n new device.

                                                                      " } }, "IsLongTermPricingAutoRenew": { @@ -3134,12 +3166,12 @@ "IsWifiEnabled": { "target": "com.amazonaws.snowball#Boolean", "traits": { - "smithy.api#documentation": "

                                                                      Enables the Wi-Fi adapter on an AWS Snowcone device.

                                                                      " + "smithy.api#documentation": "

                                                                      Enables the Wi-Fi adapter on an Snowcone device.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Configures the wireless connection on an AWS Snowcone device.

                                                                      " + "smithy.api#documentation": "

                                                                      Configures the wireless connection on an Snowcone device.

                                                                      " } } } diff --git a/codegen/sdk-codegen/aws-models/ssm.json b/codegen/sdk-codegen/aws-models/ssm.json index b6bea77306b5..87111bdcee3b 100644 --- a/codegen/sdk-codegen/aws-models/ssm.json +++ b/codegen/sdk-codegen/aws-models/ssm.json @@ -115,31 +115,31 @@ "DefaultInstanceName": { "target": "com.amazonaws.ssm#DefaultInstanceName", "traits": { - "smithy.api#documentation": "

                                                                      A name for the managed instance when it is created.

                                                                      " + "smithy.api#documentation": "

                                                                      A name for the managed node when it is created.

                                                                      " } }, "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) role to assign to the managed\n instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) role to assign to the managed\n node.

                                                                      " } }, "RegistrationLimit": { "target": "com.amazonaws.ssm#RegistrationLimit", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of managed instances that can be registered using this activation.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of managed nodes that can be registered using this activation.

                                                                      " } }, "RegistrationsCount": { "target": "com.amazonaws.ssm#RegistrationsCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of managed instances already registered with this activation.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes already registered with this activation.

                                                                      " } }, "ExpirationDate": { "target": "com.amazonaws.ssm#ExpirationDate", "traits": { - "smithy.api#documentation": "

                                                                      The date when this activation can no longer be used to register managed instances.

                                                                      " + "smithy.api#documentation": "

                                                                      The date when this activation can no longer be used to register managed nodes.

                                                                      " } }, "Expired": { @@ -162,7 +162,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      An activation registers one or more on-premises servers or virtual machines (VMs) with Amazon Web Services\n so that you can configure those servers or VMs using Run Command. A server or VM that has been\n registered with Amazon Web Services Systems Manager is called a managed instance.

                                                                      " + "smithy.api#documentation": "

                                                                      An activation registers one or more on-premises servers or virtual machines (VMs) with Amazon Web Services\n so that you can configure those servers or VMs using Run Command. A server or VM that has been\n registered with Amazon Web Services Systems Manager is called a managed node.

                                                                      " } }, "com.amazonaws.ssm#ActivationCode": { @@ -221,7 +221,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Adds or overwrites one or more tags for the specified resource. Tags are metadata that you\n can assign to your documents, managed instances, maintenance windows, Parameter Store parameters,\n and patch baselines. Tags enable you to categorize your resources in different ways, for example,\n by purpose, owner, or environment. Each tag consists of a key and an optional value, both of\n which you define. For example, you could define a set of tags for your account's managed\n instances that helps you track each instance's owner and stack level. For example:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=Owner,Value=DbAdmin\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Owner,Value=SysAdmin\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Owner,Value=Dev\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Production\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Pre-Production\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Test\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Each resource can have a maximum of 50 tags.

                                                                      \n

                                                                      We recommend that you devise a set of tag keys that meets your needs for each resource type.\n Using a consistent set of tag keys makes it easier for you to manage your resources. You can\n search and filter the resources based on the tags you add. Tags don't have any semantic meaning\n to and are interpreted strictly as a string of characters.

                                                                      \n

                                                                      For more information about using tags with Amazon Elastic Compute Cloud (Amazon EC2) instances, see Tagging your Amazon EC2\n resources in the Amazon EC2 User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Adds or overwrites one or more tags for the specified resource. Tags are metadata that you\n can assign to your documents, managed nodes, maintenance windows, Parameter Store parameters, and\n patch baselines. Tags enable you to categorize your resources in different ways, for example, by\n purpose, owner, or environment. Each tag consists of a key and an optional value, both of which\n you define. For example, you could define a set of tags for your account's managed nodes that\n helps you track each node's owner and stack level. For example:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=Owner,Value=DbAdmin\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Owner,Value=SysAdmin\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Owner,Value=Dev\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Production\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Pre-Production\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Stack,Value=Test\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      Each resource can have a maximum of 50 tags.

                                                                      \n

                                                                      We recommend that you devise a set of tag keys that meets your needs for each resource type.\n Using a consistent set of tag keys makes it easier for you to manage your resources. You can\n search and filter the resources based on the tags you add. Tags don't have any semantic meaning\n to and are interpreted strictly as a string of characters.

                                                                      \n

                                                                      For more information about using tags with Amazon Elastic Compute Cloud (Amazon EC2) instances, see Tagging your Amazon EC2\n resources in the Amazon EC2 User Guide.

                                                                      " } }, "com.amazonaws.ssm#AddTagsToResourceRequest": { @@ -230,14 +230,14 @@ "ResourceType": { "target": "com.amazonaws.ssm#ResourceTypeForTagging", "traits": { - "smithy.api#documentation": "

                                                                      Specifies the type of resource you are tagging.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is for on-premises managed\n instances. You must specify the name of the managed instance in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Specifies the type of resource you are tagging.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is for on-premises managed\n nodes. You must specify the name of the managed node in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, "ResourceId": { "target": "com.amazonaws.ssm#ResourceId", "traits": { - "smithy.api#documentation": "

                                                                      The resource ID you want to tag.

                                                                      \n

                                                                      Use the ID of the resource. Here are some examples:

                                                                      \n

                                                                      \n MaintenanceWindow: mw-012345abcde\n

                                                                      \n

                                                                      \n PatchBaseline: pb-012345abcde\n

                                                                      \n

                                                                      \n OpsMetadata object: ResourceID for tagging is created from the\n Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from\n the strings that come after the word opsmetadata in the ARN. For example, an\n OpsMetadata object with an ARN of\n arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a\n ResourceID of either aws/ssm/MyGroup/appmanager or\n /aws/ssm/MyGroup/appmanager.

                                                                      \n

                                                                      For the Document and Parameter values, use the name of the\n resource.

                                                                      \n

                                                                      \n ManagedInstance: mi-012345abcde\n

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises\n managed instances. You must specify the name of the managed instance in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The resource ID you want to tag.

                                                                      \n

                                                                      Use the ID of the resource. Here are some examples:

                                                                      \n

                                                                      \n MaintenanceWindow: mw-012345abcde\n

                                                                      \n

                                                                      \n PatchBaseline: pb-012345abcde\n

                                                                      \n

                                                                      \n OpsMetadata object: ResourceID for tagging is created from the\n Amazon Resource Name (ARN) for the object. Specifically, ResourceID is created from\n the strings that come after the word opsmetadata in the ARN. For example, an\n OpsMetadata object with an ARN of\n arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager has a\n ResourceID of either aws/ssm/MyGroup/appmanager or\n /aws/ssm/MyGroup/appmanager.

                                                                      \n

                                                                      For the Document and Parameter values, use the name of the\n resource.

                                                                      \n

                                                                      \n ManagedInstance: mi-012345abcde\n

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises\n managed nodes. You must specify the name of the managed node in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, @@ -305,7 +305,7 @@ "name": "ssm" }, "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "

                                                                      Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as\n collecting system inventory, applying operating system (OS) patches, automating the creation of\n Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale.\n Systems Manager lets you remotely and securely manage the configuration of your managed instances. A\n managed instance is any Amazon Elastic Compute Cloud instance (EC2 instance), or any\n on-premises server or virtual machine (VM) in your hybrid environment that has been configured\n for Systems Manager.

                                                                      \n

                                                                      This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      To get started, verify prerequisites and configure managed instances. For more information,\n see Setting up\n Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      \n Related resources\n

                                                                      \n ", + "smithy.api#documentation": "

                                                                      Amazon Web Services Systems Manager is a collection of capabilities that helps you automate management tasks such as\n collecting system inventory, applying operating system (OS) patches, automating the creation of\n Amazon Machine Images (AMIs), and configuring operating systems (OSs) and applications at scale.\n Systems Manager lets you remotely and securely manage the configuration of your managed nodes. A\n managed node is any Amazon Elastic Compute Cloud (Amazon EC2) instance, edge\n device, or on-premises server or virtual machine (VM) that has been configured for\n Systems Manager.

                                                                      \n \n

                                                                      With support for IoT Greengrass Version 2 devices, the phrase managed\n instance has been changed to managed node in most of the Systems Manager\n documentation. The Systems Manager console, API\n calls, error messages, and SSM documents still use the term instance.

                                                                      \n
                                                                      \n

                                                                      This reference is intended to be used with the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      To get started, verify prerequisites and configure managed nodes. For more information, see\n Setting up\n Amazon Web Services Systems Manager in the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      \n Related resources\n

                                                                      \n ", "smithy.api#title": "Amazon Simple Systems Manager (SSM)", "smithy.api#xmlNamespace": { "uri": "http://ssm.amazonaws.com/doc/2014-11-06/" @@ -813,7 +813,7 @@ "code": "AssociatedInstances", "httpResponseCode": 400 }, - "smithy.api#documentation": "

                                                                      You must disassociate a document from all instances before you can delete it.

                                                                      ", + "smithy.api#documentation": "

                                                                      You must disassociate a document from all managed nodes before you can delete it.

                                                                      ", "smithy.api#error": "client" } }, @@ -829,7 +829,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "AssociationId": { @@ -853,7 +853,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The instances targeted by the request to create an association. You can target all instances\n in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed nodes targeted by the request to create an association. You can target all\n managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *.

                                                                      " } }, "LastExecutionDate": { @@ -882,7 +882,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes an association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes an association of a Amazon Web Services Systems Manager document (SSM document) and a managed node.

                                                                      " } }, "com.amazonaws.ssm#AssociationAlreadyExists": { @@ -936,7 +936,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "AssociationVersion": { @@ -996,7 +996,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The instances targeted by the request.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed nodes targeted by the request.

                                                                      " } }, "ScheduleExpression": { @@ -1032,13 +1032,13 @@ "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n instances and set MaxError to 10%, then the system stops sending the request when\n the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n managed nodes and set MaxError to 10%, then the system stops sending the request\n when the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " } }, "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new instance will process its association within the limit specified\n for MaxConcurrency.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new managed node will process its association within the limit specified\n for MaxConcurrency.

                                                                      " } }, "ComplianceSeverity": { @@ -1273,13 +1273,13 @@ "ResourceId": { "target": "com.amazonaws.ssm#AssociationResourceId", "traits": { - "smithy.api#documentation": "

                                                                      The resource ID, for example, the instance ID where the association ran.

                                                                      " + "smithy.api#documentation": "

                                                                      The resource ID, for example, the managed node ID where the association ran.

                                                                      " } }, "ResourceType": { "target": "com.amazonaws.ssm#AssociationResourceType", "traits": { - "smithy.api#documentation": "

                                                                      The resource type, for example, instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The resource type, for example, EC2.

                                                                      " } }, "Status": { @@ -1557,7 +1557,7 @@ "AssociationStatusAggregatedCount": { "target": "com.amazonaws.ssm#AssociationStatusAggregatedCount", "traits": { - "smithy.api#documentation": "

                                                                      Returns the number of targets for the association status. For example, if you created an\n association with two instances, and one of them was successful, this would return the count of\n instances by status.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the number of targets for the association status. For example, if you created an\n association with two managed nodes, and one of them was successful, this would return the count\n of managed nodes by status.

                                                                      " } } }, @@ -1733,13 +1733,13 @@ "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n instances and set MaxError to 10%, then the system stops sending the request when\n the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n managed nodes and set MaxError to 10%, then the system stops sending the request\n when the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " } }, "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new instance will process its association within the limit specified\n for MaxConcurrency.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new managed node will process its association within the limit specified\n for MaxConcurrency.

                                                                      " } }, "ComplianceSeverity": { @@ -2832,13 +2832,13 @@ "ApprovedPatchesEnableNonSecurity": { "target": "com.amazonaws.ssm#Boolean", "traits": { - "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the instances. The default value is false. Applies to Linux instances\n only.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the managed nodes. The default value is false. Applies to Linux managed\n nodes only.

                                                                      " } }, "Sources": { "target": "com.amazonaws.ssm#PatchSourceList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repositories. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.

                                                                      " } } }, @@ -2915,7 +2915,7 @@ "InstanceIds": { "target": "com.amazonaws.ssm#InstanceIdList", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) A list of instance IDs on which you want to cancel the command. If not provided,\n the command is canceled on every instance on which it was requested.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) A list of managed node IDs on which you want to cancel the command. If not provided,\n the command is canceled on every node on which it was requested.

                                                                      " } } }, @@ -3074,13 +3074,13 @@ "InstanceIds": { "target": "com.amazonaws.ssm#InstanceIdList", "traits": { - "smithy.api#documentation": "

                                                                      The instance IDs against which this command was requested.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node IDs against which this command was requested.

                                                                      " } }, "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      An array of search criteria that targets instances using a Key,Value combination that you\n specify. Targets is required if you don't provide one or more instance IDs in the call.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of search criteria that targets managed nodes using a Key,Value combination that\n you specify. Targets is required if you don't provide one or more managed node IDs in the\n call.

                                                                      " } }, "RequestedDateTime": { @@ -3098,7 +3098,7 @@ "StatusDetails": { "target": "com.amazonaws.ssm#StatusDetails", "traits": { - "smithy.api#documentation": "

                                                                      A detailed status of the command execution. StatusDetails includes more\n information than Status because it includes states resulting from error and\n concurrency control parameters. StatusDetails can show different results than\n Status. For more information about these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to any instances.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to at least one instance but hasn't reached a final\n state on all instances.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The command successfully ran on all invocations. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of\n Delivery Timed Out. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: The value of MaxErrors or more command invocations shows a status of\n Execution Timed Out. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The value of MaxErrors or more command invocations shows a status of Failed. This\n is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Incomplete: The command was attempted on all instances and one or more invocations doesn't\n have a value of Success but not enough invocations failed for the status to be Failed. This is\n a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Rate Exceeded: The number of instances targeted by the command exceeded the account limit\n for pending invocations. The system has canceled the command before running it on any instance.\n This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      A detailed status of the command execution. StatusDetails includes more\n information than Status because it includes states resulting from error and\n concurrency control parameters. StatusDetails can show different results than\n Status. For more information about these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to any managed nodes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to at least one managed node but hasn't reached a final\n state on all managed nodes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The command successfully ran on all invocations. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The value of MaxErrors or more command invocations shows a status of\n Delivery Timed Out. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: The value of MaxErrors or more command invocations shows a status of\n Execution Timed Out. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The value of MaxErrors or more command invocations shows a status of Failed. This\n is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Incomplete: The command was attempted on all managed nodes and one or more invocations\n doesn't have a value of Success but not enough invocations failed for the status to be Failed.\n This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Rate Exceeded: The number of managed nodes targeted by the command exceeded the account\n limit for pending invocations. The system has canceled the command before running it on any\n managed node. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " } }, "OutputS3Region": { @@ -3122,7 +3122,7 @@ "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of instances that are allowed to run the command at the same time. You\n can specify a number of instances, such as 10, or a percentage of instances, such as 10%. The\n default value is 50. For more information about how to use MaxConcurrency, see\n Running\n commands using Systems Manager Run Command in the Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of managed nodes that are allowed to run the command at the same time.\n You can specify a number of managed nodes, such as 10, or a percentage of nodes, such as 10%. The\n default value is 50. For more information about how to use MaxConcurrency, see\n Running\n commands using Systems Manager Run Command in the Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "MaxErrors": { @@ -3197,13 +3197,13 @@ "value": { "target": "com.amazonaws.ssm#CommandFilterValue", "traits": { - "smithy.api#documentation": "

                                                                      The filter value. Valid values for each filter key are as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n InvokedAfter: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions\n occurring July 7, 2021, and later.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n InvokedBefore: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions from\n before July 7, 2021.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Status: Specify a valid command status to see a list of\n all command executions with that status. The status choices depend on the API you call.

                                                                        \n

                                                                        The status values you can specify for ListCommands are:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Pending\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InProgress\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Success\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cancelled\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Failed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n TimedOut (this includes both Delivery and Execution time outs)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n AccessDenied\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n DeliveryTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n ExecutionTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Incomplete\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n NoInstancesInTag\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n LimitExceeded\n

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        The status values you can specify for ListCommandInvocations are:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Pending\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InProgress\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Delayed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Success\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cancelled\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Failed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n TimedOut (this includes both Delivery and Execution time outs)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n AccessDenied\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n DeliveryTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n ExecutionTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Undeliverable\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InvalidPlatform\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Terminated\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM\n document) for which you want to see command execution results. For example, specify\n AWS-RunPatchBaseline to see command executions that used this SSM document to\n perform security patching operations on instances.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ExecutionStage: Specify one of the following values\n (ListCommands operations only):

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Executing: Returns a list of command executions that are currently still\n running.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Complete: Returns a list of command executions that have already completed.\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The filter value. Valid values for each filter key are as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n InvokedAfter: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions\n occurring July 7, 2021, and later.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n InvokedBefore: Specify a timestamp to limit your results.\n For example, specify 2021-07-07T00:00:00Z to see a list of command executions from\n before July 7, 2021.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Status: Specify a valid command status to see a list of\n all command executions with that status. The status choices depend on the API you call.

                                                                        \n

                                                                        The status values you can specify for ListCommands are:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Pending\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InProgress\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Success\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cancelled\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Failed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n TimedOut (this includes both Delivery and Execution time outs)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n AccessDenied\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n DeliveryTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n ExecutionTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Incomplete\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n NoInstancesInTag\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n LimitExceeded\n

                                                                          \n
                                                                        • \n
                                                                        \n

                                                                        The status values you can specify for ListCommandInvocations are:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Pending\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InProgress\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Delayed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Success\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Cancelled\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Failed\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n TimedOut (this includes both Delivery and Execution time outs)

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n AccessDenied\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n DeliveryTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n ExecutionTimedOut\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Undeliverable\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n InvalidPlatform\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Terminated\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n DocumentName: Specify name of the Amazon Web Services Systems Manager document (SSM\n document) for which you want to see command execution results. For example, specify\n AWS-RunPatchBaseline to see command executions that used this SSM document to\n perform security patching operations on managed nodes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n ExecutionStage: Specify one of the following values\n (ListCommands operations only):

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n Executing: Returns a list of command executions that are currently still\n running.

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n Complete: Returns a list of command executions that have already completed.\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a command filter.

                                                                      \n \n

                                                                      An instance ID can't be specified when a command status is Pending because the\n command hasn't run on the instance yet.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Describes a command filter.

                                                                      \n \n

                                                                      A managed node ID can't be specified when a command status is Pending because the\n command hasn't run on the node yet.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#CommandFilterKey": { @@ -3275,13 +3275,13 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID in which this invocation was requested.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID in which this invocation was requested.

                                                                      " } }, "InstanceName": { "target": "com.amazonaws.ssm#InstanceTagName", "traits": { - "smithy.api#documentation": "

                                                                      The fully qualified host name of the managed instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The fully qualified host name of the managed node.

                                                                      " } }, "Comment": { @@ -3305,7 +3305,7 @@ "RequestedDateTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The time and date the request was sent to this instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The time and date the request was sent to this managed node.

                                                                      " } }, "Status": { @@ -3317,7 +3317,7 @@ "StatusDetails": { "target": "com.amazonaws.ssm#StatusDetails", "traits": { - "smithy.api#documentation": "

                                                                      A detailed status of the command execution for each invocation (each instance targeted by\n the command). StatusDetails includes more information than Status because it includes states\n resulting from error and concurrency control parameters. StatusDetails can show different results\n than Status. For more information about these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the instance but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The execution of the command or plugin was successfully completed. This is a\n terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the instance before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: Command execution started on the instance, but the execution wasn't\n complete before the execution timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't successful on the instance. For a plugin, this indicates that\n the result code wasn't zero. For a command invocation, this indicates that the result code for\n one or more plugins wasn't zero. Invocation failures count against the MaxErrors\n limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the instance. The instance might not\n exist or might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit and don't contribute to whether the parent command status is Success\n or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent command\n invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      A detailed status of the command execution for each invocation (each managed node targeted by\n the command). StatusDetails includes more information than Status because it includes states\n resulting from error and concurrency control parameters. StatusDetails can show different results\n than Status. For more information about these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the managed node.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the managed node but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The execution of the command or plugin was successfully completed. This is a\n terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the managed node before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: Command execution started on the managed node, but the execution wasn't\n complete before the execution timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't successful on the managed node. For a plugin, this indicates that\n the result code wasn't zero. For a command invocation, this indicates that the result code for\n one or more plugins wasn't zero. Invocation failures count against the MaxErrors\n limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the managed node. The managed node might not\n exist or might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit and don't contribute to whether the parent command status is Success\n or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent command\n invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " } }, "TraceOutput": { @@ -3347,13 +3347,13 @@ "ServiceRole": { "target": "com.amazonaws.ssm#ServiceRole", "traits": { - "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) service role that Run Command, a capability\n of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes\n on a per instance basis.

                                                                      " + "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) service role that Run Command, a capability\n of Amazon Web Services Systems Manager, uses to act on your behalf when sending notifications about command status changes\n on a per managed node basis.

                                                                      " } }, "NotificationConfig": { "target": "com.amazonaws.ssm#NotificationConfig", "traits": { - "smithy.api#documentation": "

                                                                      Configurations for sending notifications about command status changes on a per instance\n basis.

                                                                      " + "smithy.api#documentation": "

                                                                      Configurations for sending notifications about command status changes on a per managed node\n basis.

                                                                      " } }, "CloudWatchOutputConfig": { @@ -3364,7 +3364,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      An invocation is copy of a command sent to a specific instance. A command can apply to one\n or more instances. A command invocation applies to one instance. For example, if a user runs\n SendCommand against three instances, then a command invocation is created for each requested\n instance ID. A command invocation returns status and detail information about a command you ran.\n

                                                                      " + "smithy.api#documentation": "

                                                                      An invocation is a copy of a command sent to a specific managed node. A command can apply to one\n or more managed nodes. A command invocation applies to one managed node. For example, if a user runs\n SendCommand against three managed nodes, then a command invocation is created for\n each requested managed node ID. A command invocation returns status and detail information about a\n command you ran.

                                                                      " } }, "com.amazonaws.ssm#CommandInvocationList": { @@ -3445,7 +3445,7 @@ "StatusDetails": { "target": "com.amazonaws.ssm#StatusDetails", "traits": { - "smithy.api#documentation": "

                                                                      A detailed status of the plugin execution. StatusDetails includes more\n information than Status because it includes states resulting from error and concurrency control\n parameters. StatusDetails can show different results than Status. For more information about\n these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the instance but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The execution of the command or plugin was successfully completed. This is a\n terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the instance before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: Command execution started on the instance, but the execution wasn't\n complete before the execution timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't successful on the instance. For a plugin, this indicates that\n the result code wasn't zero. For a command invocation, this indicates that the result code for\n one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the\n parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the instance. The instance might not\n exist, or it might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit, and they don't contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent command\n invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      A detailed status of the plugin execution. StatusDetails includes more\n information than Status because it includes states resulting from error and concurrency control\n parameters. StatusDetails can show different results than Status. For more information about\n these statuses, see Understanding command\n statuses in the Amazon Web Services Systems Manager User Guide. StatusDetails can be one of the\n following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the managed node.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the managed node but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The execution of the command or plugin was successfully completed. This is a\n terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the managed node before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: Command execution started on the managed node, but the execution wasn't\n complete before the execution timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't successful on the managed node. For a plugin, this indicates that\n the result code wasn't zero. For a command invocation, this indicates that the result code for\n one or more plugins wasn't zero. Invocation failures count against the MaxErrors limit of the\n parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the managed node. The managed node might not\n exist, or it might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit, and they don't contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent command\n invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " } }, "ResponseCode": { @@ -3493,13 +3493,13 @@ "OutputS3BucketName": { "target": "com.amazonaws.ssm#S3BucketName", "traits": { - "smithy.api#documentation": "

                                                                      The S3 bucket where the responses to the command executions should be stored. This was\n requested when issuing the command. For example, in the following response:

                                                                      \n

                                                                      \n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

                                                                      \n

                                                                      \n doc-example-bucket is the name of the S3 bucket;

                                                                      \n

                                                                      \n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                      \n

                                                                      \n i-02573cafcfEXAMPLE is the instance ID;

                                                                      \n

                                                                      \n awsrunShellScript is the name of the plugin.

                                                                      " + "smithy.api#documentation": "

                                                                      The S3 bucket where the responses to the command executions should be stored. This was\n requested when issuing the command. For example, in the following response:

                                                                      \n

                                                                      \n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

                                                                      \n

                                                                      \n doc-example-bucket is the name of the S3 bucket;

                                                                      \n

                                                                      \n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                      \n

                                                                      \n i-02573cafcfEXAMPLE is the managed node ID;

                                                                      \n

                                                                      \n awsrunShellScript is the name of the plugin.

                                                                      " } }, "OutputS3KeyPrefix": { "target": "com.amazonaws.ssm#S3KeyPrefix", "traits": { - "smithy.api#documentation": "

                                                                      The S3 directory path inside the bucket where the responses to the command executions should\n be stored. This was requested when issuing the command. For example, in the following\n response:

                                                                      \n

                                                                      \n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

                                                                      \n

                                                                      \n doc-example-bucket is the name of the S3 bucket;

                                                                      \n

                                                                      \n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                      \n

                                                                      \n i-02573cafcfEXAMPLE is the instance ID;

                                                                      \n

                                                                      \n awsrunShellScript is the name of the plugin.

                                                                      " + "smithy.api#documentation": "

                                                                      The S3 directory path inside the bucket where the responses to the command executions should\n be stored. This was requested when issuing the command. For example, in the following\n response:

                                                                      \n

                                                                      \n doc-example-bucket/ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix/i-02573cafcfEXAMPLE/awsrunShellScript\n

                                                                      \n

                                                                      \n doc-example-bucket is the name of the S3 bucket;

                                                                      \n

                                                                      \n ab19cb99-a030-46dd-9dfc-8eSAMPLEPre-Fix is the name of the S3 prefix;

                                                                      \n

                                                                      \n i-02573cafcfEXAMPLE is the managed node ID;

                                                                      \n

                                                                      \n awsrunShellScript is the name of the plugin.

                                                                      " } } }, @@ -3674,7 +3674,7 @@ "ResourceId": { "target": "com.amazonaws.ssm#ComplianceResourceId", "traits": { - "smithy.api#documentation": "

                                                                      An ID for the resource. For a managed instance, this is the instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      An ID for the resource. For a managed node, this is the node ID.

                                                                      " } }, "Id": { @@ -4124,7 +4124,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Generates an activation code and activation ID you can use to register your on-premises\n server or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with Systems Manager makes it\n possible to manage them using Systems Manager capabilities. You use the activation code and ID when\n installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises instances and VMs using Systems Manager, see Setting up\n Amazon Web Services Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User Guide.

                                                                      \n \n

                                                                      On-premises servers or VMs that are registered with Systems Manager and Amazon Elastic Compute Cloud (Amazon EC2) instances\n that you manage with Systems Manager are all called managed instances.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Generates an activation code and activation ID you can use to register your on-premises\n servers, edge devices, or virtual machine (VM) with Amazon Web Services Systems Manager. Registering these machines with\n Systems Manager makes it possible to manage them using Systems Manager capabilities. You use the activation code and\n ID when installing SSM Agent on machines in your hybrid environment. For more information about\n requirements for managing on-premises machines using Systems Manager, see Setting up\n Amazon Web Services Systems Manager for hybrid environments in the Amazon Web Services Systems Manager User Guide.

                                                                      \n \n

                                                                      Amazon Elastic Compute Cloud (Amazon EC2) instances, edge devices, and on-premises servers and VMs that are configured for Systems Manager are all called managed\n nodes.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#CreateActivationRequest": { @@ -4139,13 +4139,13 @@ "DefaultInstanceName": { "target": "com.amazonaws.ssm#DefaultInstanceName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the registered, managed instance as it will appear in the Amazon Web Services Systems Manager console or\n when you use the Amazon Web Services command line tools to list Systems Manager resources.

                                                                      \n \n

                                                                      Don't enter personally identifiable information in this field.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The name of the registered, managed node as it will appear in the Amazon Web Services Systems Manager console or when\n you use the Amazon Web Services command line tools to list Systems Manager resources.

                                                                      \n \n

                                                                      Don't enter personally identifiable information in this field.

                                                                      \n
                                                                      " } }, "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

                                                                      The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed instance. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the Identity and Access Management (IAM) role that you want to assign to\n the managed node. This IAM role must provide AssumeRole permissions for the\n Amazon Web Services Systems Manager service principal ssm.amazonaws.com. For more information, see Create an\n IAM service role for a hybrid environment in the\n Amazon Web Services Systems Manager User Guide.

                                                                      ", "smithy.api#required": {} } }, @@ -4153,7 +4153,7 @@ "target": "com.amazonaws.ssm#RegistrationLimit", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Specify the maximum number of managed instances you want to register. The default value is\n 1.

                                                                      " + "smithy.api#documentation": "

                                                                      Specify the maximum number of managed nodes you want to register. The default value is\n 1.

                                                                      " } }, "ExpirationDate": { @@ -4165,7 +4165,7 @@ "Tags": { "target": "com.amazonaws.ssm#TagList", "traits": { - "smithy.api#documentation": "

                                                                      Optional metadata that you assign to a resource. Tags enable you to categorize a resource in\n different ways, such as by purpose, owner, or environment. For example, you might want to tag an\n activation to identify which servers or virtual machines (VMs) in your on-premises environment\n you intend to activate. In this case, you could specify the following key-value pairs:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=OS,Value=Windows\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Environment,Value=Production\n

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID\n and code. When you specify the activation ID and code, tags assigned to the activation are\n automatically applied to the on-premises servers or VMs.

                                                                      \n
                                                                      \n

                                                                      You can't add tags to or delete tags from an existing activation. You can tag your\n on-premises servers and VMs after they connect to Systems Manager for the first time and are assigned a\n managed instance ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is\n prefixed with \"mi-\". For information about how to add tags to your managed instances, see AddTagsToResource. For information about how to remove tags from your managed\n instances, see RemoveTagsFromResource.

                                                                      " + "smithy.api#documentation": "

                                                                      Optional metadata that you assign to a resource. Tags enable you to categorize a resource in\n different ways, such as by purpose, owner, or environment. For example, you might want to tag an\n activation to identify which servers or virtual machines (VMs) in your on-premises environment\n you intend to activate. In this case, you could specify the following key-value pairs:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=OS,Value=Windows\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=Environment,Value=Production\n

                                                                        \n
                                                                      • \n
                                                                      \n \n

                                                                      When you install SSM Agent on your on-premises servers and VMs, you specify an activation ID\n and code. When you specify the activation ID and code, tags assigned to the activation are\n automatically applied to the on-premises servers or VMs.

                                                                      \n
                                                                      \n

                                                                      You can't add tags to or delete tags from an existing activation. You can tag your\n on-premises servers, edge devices, and VMs after they connect to Systems Manager for the first time and are assigned a\n managed node ID. This means they are listed in the Amazon Web Services Systems Manager console with an ID that is\n prefixed with \"mi-\". For information about how to add tags to your managed nodes, see AddTagsToResource. For information about how to remove tags from your managed nodes,\n see RemoveTagsFromResource.

                                                                      " } }, "RegistrationMetadata": { @@ -4237,7 +4237,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      A State Manager association defines the state that you want to maintain on your instances.\n For example, an association can specify that anti-virus software must be installed and running on\n your instances, or that certain ports must be closed. For static targets, the association\n specifies a schedule for when the configuration is reapplied. For dynamic targets, such as an\n Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of Amazon Web Services Systems Manager\n applies the configuration when new instances are added to the group. The association also\n specifies actions to take when applying the configuration. For example, an association for\n anti-virus software might run once a day. If the software isn't installed, then State Manager\n installs it. If the software is installed, but the service isn't running, then the association\n might instruct State Manager to start the service.

                                                                      " + "smithy.api#documentation": "

                                                                      A State Manager association defines the state that you want to maintain on your managed\n nodes. For example, an association can specify that anti-virus software must be installed and\n running on your managed nodes, or that certain ports must be closed. For static targets, the\n association specifies a schedule for when the configuration is reapplied. For dynamic targets,\n such as an Amazon Web Services resource group or an Amazon Web Services autoscaling group, State Manager, a capability of\n Amazon Web Services Systems Manager applies the configuration when new managed nodes are added to the group. The\n association also specifies actions to take when applying the configuration. For example, an\n association for anti-virus software might run once a day. If the software isn't installed, then\n State Manager installs it. If the software is installed, but the service isn't running, then the\n association might instruct State Manager to start the service.

                                                                      " } }, "com.amazonaws.ssm#CreateAssociationBatch": { @@ -4284,7 +4284,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified instances or\n targets.

                                                                      \n

                                                                      When you associate a document with one or more instances using instance IDs or tags,\n Amazon Web Services Systems Manager Agent (SSM Agent) running on the instance processes the document and configures the\n instance as specified.

                                                                      \n

                                                                      If you associate a document with an instance that already has an associated document, the\n system returns the AssociationAlreadyExists exception.

                                                                      " + "smithy.api#documentation": "

                                                                      Associates the specified Amazon Web Services Systems Manager document (SSM document) with the specified managed nodes\n or targets.

                                                                      \n

                                                                      When you associate a document with one or more managed nodes using IDs or tags,\n Amazon Web Services Systems Manager Agent (SSM Agent) running on the managed node processes the document and configures the\n node as specified.

                                                                      \n

                                                                      If you associate a document with a managed node that already has an associated document, the\n system returns the AssociationAlreadyExists exception.

                                                                      " } }, "com.amazonaws.ssm#CreateAssociationBatchRequest": { @@ -4319,14 +4319,14 @@ "Name": { "target": "com.amazonaws.ssm#DocumentARN", "traits": { - "smithy.api#documentation": "

                                                                      The name of the SSM document that contains the configuration information for the instance.\n You can specify Command or Automation runbooks.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For SSM documents that are shared with you from other Amazon Web Services accounts, you must specify the\n complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:aws:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the SSM document that contains the configuration information for the managed node.\n You can specify Command or Automation runbooks.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For SSM documents that are shared with you from other Amazon Web Services accounts, you must specify the\n complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:aws:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      ", "smithy.api#required": {} } }, "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify an instance ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify a managed node ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " } }, "Parameters": { @@ -4350,7 +4350,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The instances targeted by the request.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed nodes targeted by the request.

                                                                      " } }, "ScheduleExpression": { @@ -4374,13 +4374,13 @@ "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n instances and set MaxError to 10%, then the system stops sending the request when\n the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n managed nodes and set MaxError to 10%, then the system stops sending the request\n when the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " } }, "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new instance will process its association within the limit specified\n for MaxConcurrency.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new managed node will process its association within the limit specified\n for MaxConcurrency.

                                                                      " } }, "ComplianceSeverity": { @@ -4415,7 +4415,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes the association of a Amazon Web Services Systems Manager document (SSM document) and an instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes the association of a Amazon Web Services Systems Manager document (SSM document) and a managed node.

                                                                      " } }, "com.amazonaws.ssm#CreateAssociationBatchResult": { @@ -4441,7 +4441,7 @@ "Name": { "target": "com.amazonaws.ssm#DocumentARN", "traits": { - "smithy.api#documentation": "

                                                                      The name of the SSM Command document or Automation runbook that contains the configuration\n information for the instance.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For Systems Manager documents (SSM documents) that are shared with you from other Amazon Web Services accounts, you\n must specify the complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:partition:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the SSM Command document or Automation runbook that contains the configuration\n information for the managed node.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For Systems Manager documents (SSM documents) that are shared with you from other Amazon Web Services accounts, you\n must specify the complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:partition:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      ", "smithy.api#required": {} } }, @@ -4454,7 +4454,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify an instance ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify a managed node ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " } }, "Parameters": { @@ -4466,7 +4466,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets for the association. You can target instances by using tags, Amazon Web Services resource\n groups, all instances in an Amazon Web Services account, or individual instance IDs. You can target all\n instances in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The targets for the association. You can target managed nodes by using tags, Amazon Web Services resource\n groups, all managed nodes in an Amazon Web Services account, or individual managed node IDs. You can target all\n managed nodes in an Amazon Web Services account by specifying the InstanceIds key with a value of\n *. For more information about choosing targets for an association, see Using targets and rate controls with State Manager associations in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "ScheduleExpression": { @@ -4496,13 +4496,13 @@ "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n instances and set MaxError to 10%, then the system stops sending the request when\n the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n managed nodes and set MaxError to 10%, then the system stops sending the request\n when the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " } }, "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new instance will process its association within the limit specified\n for MaxConcurrency.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new managed node will process its association within the limit specified\n for MaxConcurrency.

                                                                      " } }, "ComplianceSeverity": { @@ -4577,7 +4577,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs\n on your managed instances. For more information about SSM documents, including information about\n supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      Creates a Amazon Web Services Systems Manager (SSM document). An SSM document defines the actions that Systems Manager performs\n on your managed nodes. For more information about SSM documents, including information about\n supported schemas, features, and syntax, see Amazon Web Services Systems Manager Documents in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "com.amazonaws.ssm#CreateDocumentRequest": { @@ -4746,7 +4746,7 @@ "AllowUnassociatedTargets": { "target": "com.amazonaws.ssm#MaintenanceWindowAllowUnassociatedTargets", "traits": { - "smithy.api#documentation": "

                                                                      Enables a maintenance window task to run on managed instances, even if you haven't\n registered those instances as targets. If enabled, then you must specify the unregistered\n instances (by instance ID) when you register a task with the maintenance window.

                                                                      \n

                                                                      If you don't enable this option, then you must specify previously-registered targets when\n you register a task with the maintenance window.

                                                                      ", + "smithy.api#documentation": "

                                                                      Enables a maintenance window task to run on managed nodes, even if you haven't registered\n those nodes as targets. If enabled, then you must specify the unregistered managed nodes (by\n node ID) when you register a task with the maintenance window.

                                                                      \n

                                                                      If you don't enable this option, then you must specify previously-registered targets when\n you register a task with the maintenance window.

                                                                      ", "smithy.api#required": {} } }, @@ -5042,7 +5042,7 @@ "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the instances. The default value is false. Applies to Linux instances\n only.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the managed nodes. The default value is false. Applies to Linux managed\n nodes only.

                                                                      " } }, "RejectedPatches": { @@ -5066,7 +5066,7 @@ "Sources": { "target": "com.amazonaws.ssm#PatchSourceList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repositories. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.

                                                                      " } }, "ClientToken": { @@ -5213,7 +5213,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes an activation. You aren't required to delete an activation. If you delete an\n activation, you can no longer use it to register additional managed instances. Deleting an\n activation doesn't de-register managed instances. You must manually de-register managed\n instances.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes an activation. You aren't required to delete an activation. If you delete an\n activation, you can no longer use it to register additional managed nodes. Deleting an activation\n doesn't de-register managed nodes. You must manually de-register managed nodes.

                                                                      " } }, "com.amazonaws.ssm#DeleteActivationRequest": { @@ -5258,7 +5258,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified instance.\n If you created the association by using the Targets parameter, then you must delete\n the association by using the association ID.

                                                                      \n

                                                                      When you disassociate a document from an instance, it doesn't change the configuration of\n the instance. To change the configuration state of an instance after you disassociate a document,\n you must create a new document with the desired configuration and associate it with the\n instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Disassociates the specified Amazon Web Services Systems Manager document (SSM document) from the specified managed node.\n If you created the association by using the Targets parameter, then you must delete\n the association by using the association ID.

                                                                      \n

                                                                      When you disassociate a document from a managed node, it doesn't change the configuration of\n the node. To change the configuration state of a managed node after you disassociate a document,\n you must create a new document with the desired configuration and associate it with the\n node.

                                                                      " } }, "com.amazonaws.ssm#DeleteAssociationRequest": { @@ -5273,7 +5273,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify an instance ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      \n \n

                                                                      \n InstanceId has been deprecated. To specify a managed node ID for an association,\n use the Targets parameter. Requests that include the\n parameter InstanceID with Systems Manager documents (SSM documents) that use schema version\n 2.0 or later will fail. In addition, if you use the parameter\n InstanceId, you can't use the parameters AssociationName,\n DocumentVersion, MaxErrors, MaxConcurrency,\n OutputLocation, or ScheduleExpression. To use these parameters, you\n must use the Targets parameter.

                                                                      \n
                                                                      " } }, "AssociationId": { @@ -5311,7 +5311,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes the Amazon Web Services Systems Manager document (SSM document) and all instance associations to the\n document.

                                                                      \n

                                                                      Before you delete the document, we recommend that you use DeleteAssociation to disassociate all instances that are associated with the document.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes the Amazon Web Services Systems Manager document (SSM document) and all managed node associations to the\n document.

                                                                      \n

                                                                      Before you delete the document, we recommend that you use DeleteAssociation to disassociate all managed nodes that are associated with the document.

                                                                      " } }, "com.amazonaws.ssm#DeleteDocumentRequest": { @@ -5655,7 +5655,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Deletes a resource data sync configuration. After the configuration is deleted, changes to\n data on managed instances are no longer synced to or from the target. Deleting a sync\n configuration doesn't delete data.

                                                                      " + "smithy.api#documentation": "

                                                                      Deletes a resource data sync configuration. After the configuration is deleted, changes to\n data on managed nodes are no longer synced to or from the target. Deleting a sync configuration\n doesn't delete data.

                                                                      " } }, "com.amazonaws.ssm#DeleteResourceDataSyncRequest": { @@ -5700,7 +5700,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Removes the server or virtual machine from the list of registered servers. You can\n reregister the instance again at any time. If you don't plan to use Run Command on the server, we\n suggest uninstalling SSM Agent first.

                                                                      " + "smithy.api#documentation": "

                                                                      Removes the server or virtual machine from the list of registered servers. You can\n reregister the node again at any time. If you don't plan to use Run Command on the server, we\n suggest uninstalling SSM Agent first.

                                                                      " } }, "com.amazonaws.ssm#DeregisterManagedInstanceRequest": { @@ -5709,7 +5709,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#ManagedInstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID assigned to the managed instance when you registered it using the activation process.\n

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID assigned to the managed node when you registered it using the activation process.\n

                                                                      ", "smithy.api#required": {} } } @@ -5917,7 +5917,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes details about the activation, such as the date and time the activation was\n created, its expiration date, the Identity and Access Management (IAM) role assigned to\n the instances in the activation, and the number of instances registered by using this\n activation.

                                                                      ", + "smithy.api#documentation": "

                                                                      Describes details about the activation, such as the date and time the activation was\n created, its expiration date, the Identity and Access Management (IAM) role assigned to\n the managed nodes in the activation, and the number of nodes registered by using this\n activation.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6038,7 +6038,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes the association for the specified target or instance. If you created the\n association by using the Targets parameter, then you must retrieve the association\n by using the association ID.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes the association for the specified target or managed node. If you created the\n association by using the Targets parameter, then you must retrieve the association\n by using the association ID.

                                                                      " } }, "com.amazonaws.ssm#DescribeAssociationExecutionTargets": { @@ -6217,7 +6217,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "AssociationId": { @@ -6229,7 +6229,7 @@ "AssociationVersion": { "target": "com.amazonaws.ssm#AssociationVersion", "traits": { - "smithy.api#documentation": "

                                                                      Specify the association version to retrieve. To view the latest version, either specify\n $LATEST for this parameter, or omit this parameter. To view a list of all\n associations for an instance, use ListAssociations. To get a list of versions\n for a specific association, use ListAssociationVersions.

                                                                      " + "smithy.api#documentation": "

                                                                      Specify the association version to retrieve. To view the latest version, either specify\n $LATEST for this parameter, or omit this parameter. To view a list of all\n associations for a managed node, use ListAssociations. To get a list of versions\n for a specific association, use ListAssociationVersions.

                                                                      " } } } @@ -6437,7 +6437,7 @@ "Filters": { "target": "com.amazonaws.ssm#PatchOrchestratorFilterList", "traits": { - "smithy.api#documentation": "

                                                                      Each element in the array is a structure containing a key-value pair.

                                                                      \n

                                                                      \n Windows Server\n

                                                                      \n

                                                                      Supported keys for Windows Server instance patches include the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n PATCH_SET\n \n

                                                                        \n

                                                                        Sample values: OS | APPLICATION\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PRODUCT\n \n

                                                                        \n

                                                                        Sample values: WindowsServer2012 | Office 2010 |\n MicrosoftDefenderAntivirus\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PRODUCT_FAMILY\n \n

                                                                        \n

                                                                        Sample values: Windows | Office\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MSRC_SEVERITY\n \n

                                                                        \n

                                                                        Sample values: ServicePacks | Important | Moderate\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CLASSIFICATION\n \n

                                                                        \n

                                                                        Sample values: ServicePacks | SecurityUpdates |\n DefinitionUpdates\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PATCH_ID\n \n

                                                                        \n

                                                                        Sample values: KB123456 | KB4516046\n

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      \n Linux\n

                                                                      \n \n

                                                                      When specifying filters for Linux patches, you must specify a key-pair for\n PRODUCT. For example, using the Command Line Interface (CLI), the\n following command fails:

                                                                      \n

                                                                      \n aws ssm describe-available-patches --filters Key=CVE_ID,Values=CVE-2018-3615\n

                                                                      \n

                                                                      However, the following command succeeds:

                                                                      \n

                                                                      \n aws ssm describe-available-patches --filters Key=PRODUCT,Values=AmazonLinux2018.03\n Key=CVE_ID,Values=CVE-2018-3615\n

                                                                      \n
                                                                      \n

                                                                      Supported keys for Linux instance patches include the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n PRODUCT\n \n

                                                                        \n

                                                                        Sample values: AmazonLinux2018.03 | AmazonLinux2.0\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NAME\n \n

                                                                        \n

                                                                        Sample values: kernel-headers | samba-python | php\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n SEVERITY\n \n

                                                                        \n

                                                                        Sample values: Critical | Important | Medium |\n Low\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EPOCH\n \n

                                                                        \n

                                                                        Sample values: 0 | 1\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n VERSION\n \n

                                                                        \n

                                                                        Sample values: 78.6.1 | 4.10.16\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n RELEASE\n \n

                                                                        \n

                                                                        Sample values: 9.56.amzn1 | 1.amzn2\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n ARCH\n \n

                                                                        \n

                                                                        Sample values: i686 | x86_64\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n REPOSITORY\n \n

                                                                        \n

                                                                        Sample values: Core | Updates\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n ADVISORY_ID\n \n

                                                                        \n

                                                                        Sample values: ALAS-2018-1058 | ALAS2-2021-1594\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CVE_ID\n \n

                                                                        \n

                                                                        Sample values: CVE-2018-3615 | CVE-2020-1472\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n BUGZILLA_ID\n \n

                                                                        \n

                                                                        Sample values: 1463241\n

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Each element in the array is a structure containing a key-value pair.

                                                                      \n

                                                                      \n Windows Server\n

                                                                      \n

                                                                      Supported keys for Windows Server managed node patches include the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n PATCH_SET\n \n

                                                                        \n

                                                                        Sample values: OS | APPLICATION\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PRODUCT\n \n

                                                                        \n

                                                                        Sample values: WindowsServer2012 | Office 2010 |\n MicrosoftDefenderAntivirus\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PRODUCT_FAMILY\n \n

                                                                        \n

                                                                        Sample values: Windows | Office\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n MSRC_SEVERITY\n \n

                                                                        \n

                                                                        Sample values: ServicePacks | Important | Moderate\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CLASSIFICATION\n \n

                                                                        \n

                                                                        Sample values: ServicePacks | SecurityUpdates |\n DefinitionUpdates\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n PATCH_ID\n \n

                                                                        \n

                                                                        Sample values: KB123456 | KB4516046\n

                                                                        \n
                                                                      • \n
                                                                      \n\n

                                                                      \n Linux\n

                                                                      \n \n

                                                                      When specifying filters for Linux patches, you must specify a key-pair for\n PRODUCT. For example, using the Command Line Interface (CLI), the\n following command fails:

                                                                      \n

                                                                      \n aws ssm describe-available-patches --filters Key=CVE_ID,Values=CVE-2018-3615\n

                                                                      \n

                                                                      However, the following command succeeds:

                                                                      \n

                                                                      \n aws ssm describe-available-patches --filters Key=PRODUCT,Values=AmazonLinux2018.03\n Key=CVE_ID,Values=CVE-2018-3615\n

                                                                      \n
                                                                      \n

                                                                      Supported keys for Linux managed node patches include the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n \n PRODUCT\n \n

                                                                        \n

                                                                        Sample values: AmazonLinux2018.03 | AmazonLinux2.0\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n NAME\n \n

                                                                        \n

                                                                        Sample values: kernel-headers | samba-python | php\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n SEVERITY\n \n

                                                                        \n

                                                                        Sample values: Critical | Important | Medium |\n Low\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n EPOCH\n \n

                                                                        \n

                                                                        Sample values: 0 | 1\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n VERSION\n \n

                                                                        \n

                                                                        Sample values: 78.6.1 | 4.10.16\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n RELEASE\n \n

                                                                        \n

                                                                        Sample values: 9.56.amzn1 | 1.amzn2\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n ARCH\n \n

                                                                        \n

                                                                        Sample values: i686 | x86_64\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n REPOSITORY\n \n

                                                                        \n

                                                                        Sample values: Core | Updates\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n ADVISORY_ID\n \n

                                                                        \n

                                                                        Sample values: ALAS-2018-1058 | ALAS2-2021-1594\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n CVE_ID\n \n

                                                                        \n

                                                                        Sample values: CVE-2018-3615 | CVE-2020-1472\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n \n BUGZILLA_ID\n \n

                                                                        \n

                                                                        Sample values: 1463241\n

                                                                        \n
                                                                      • \n
                                                                      " } }, "MaxResults": { @@ -6634,7 +6634,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      All associations for the instance(s).

                                                                      ", + "smithy.api#documentation": "

                                                                      All associations for the managed node(s).

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6649,7 +6649,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID for which you want to view all associations.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node ID for which you want to view all associations.

                                                                      ", "smithy.api#required": {} } }, @@ -6674,7 +6674,7 @@ "Associations": { "target": "com.amazonaws.ssm#InstanceAssociationList", "traits": { - "smithy.api#documentation": "

                                                                      The associations for the requested instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The associations for the requested managed node.

                                                                      " } }, "NextToken": { @@ -6779,7 +6779,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      The status of the associations for the instance(s).

                                                                      ", + "smithy.api#documentation": "

                                                                      The status of the associations for the managed node(s).

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6794,7 +6794,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance IDs for which you want association status information.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node IDs for which you want association status information.

                                                                      ", "smithy.api#required": {} } }, @@ -6856,7 +6856,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Describes one or more of your instances, including information about the operating system\n platform, the version of SSM Agent installed on the instance, instance status, and so on.

                                                                      \n

                                                                      If you specify one or more instance IDs, it returns information for those instances. If you\n don't specify instance IDs, it returns information for all your instances. If you specify an\n instance ID that isn't valid or an instance that you don't own, you receive an error.

                                                                      \n \n

                                                                      The IamRole field for this API operation is the Identity and Access Management\n (IAM) role assigned to on-premises instances. This call doesn't return the\n IAM role for EC2 instances.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Describes one or more of your managed nodes, including information about the operating\n system platform, the version of SSM Agent installed on the managed node, node status, and so\n on.

                                                                      \n

                                                                      If you specify one or more managed node IDs, it returns information for those managed nodes. If\n you don't specify node IDs, it returns information for all your managed nodes. If you specify\n a node ID that isn't valid or a node that you don't own, you receive an error.

                                                                      \n \n

                                                                      The IamRole field for this API operation is the Identity and Access Management\n (IAM) role assigned to on-premises managed nodes. This call doesn't return the\n IAM role for EC2 instances.

                                                                      \n
                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6871,13 +6871,13 @@ "InstanceInformationFilterList": { "target": "com.amazonaws.ssm#InstanceInformationFilterList", "traits": { - "smithy.api#documentation": "

                                                                      This is a legacy method. We recommend that you don't use this method. Instead, use the\n Filters data type. Filters enables you to return instance information\n by filtering based on tags applied to managed instances.

                                                                      \n \n

                                                                      Attempting to use InstanceInformationFilterList and Filters leads\n to an exception error.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      This is a legacy method. We recommend that you don't use this method. Instead, use the\n Filters data type. Filters enables you to return node information\n by filtering based on tags applied to managed nodes.

                                                                      \n \n

                                                                      Attempting to use InstanceInformationFilterList and Filters leads\n to an exception error.

                                                                      \n
                                                                      " } }, "Filters": { "target": "com.amazonaws.ssm#InstanceInformationStringFilterList", "traits": { - "smithy.api#documentation": "

                                                                      One or more filters. Use a filter to return a more specific list of instances. You can\n filter based on tags applied to EC2 instances. Use this Filters data type instead of\n InstanceInformationFilterList, which is deprecated.

                                                                      " + "smithy.api#documentation": "

                                                                      One or more filters. Use a filter to return a more specific list of managed nodes. You can\n filter based on tags applied to EC2 instances. Use this Filters data type instead of\n InstanceInformationFilterList, which is deprecated.

                                                                      " } }, "MaxResults": { @@ -6901,7 +6901,7 @@ "InstanceInformationList": { "target": "com.amazonaws.ssm#InstanceInformationList", "traits": { - "smithy.api#documentation": "

                                                                      The instance information list.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node information list.

                                                                      " } }, "NextToken": { @@ -6929,7 +6929,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the high-level patch state of one or more instances.

                                                                      ", + "smithy.api#documentation": "

                                                                      Retrieves the high-level patch state of one or more managed nodes.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -6958,7 +6958,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the high-level patch state for the instances in the specified patch group.

                                                                      ", + "smithy.api#documentation": "

                                                                      Retrieves the high-level patch state for the managed nodes in the specified patch group.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7004,7 +7004,7 @@ "InstancePatchStates": { "target": "com.amazonaws.ssm#InstancePatchStatesList", "traits": { - "smithy.api#documentation": "

                                                                      The high-level patch state for the requested instances.

                                                                      " + "smithy.api#documentation": "

                                                                      The high-level patch state for the requested managed nodes.

                                                                      " } }, "NextToken": { @@ -7021,7 +7021,7 @@ "InstanceIds": { "target": "com.amazonaws.ssm#InstanceIdList", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the instance for which patch state information should be retrieved.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the managed node for which patch state information should be retrieved.

                                                                      ", "smithy.api#required": {} } }, @@ -7035,7 +7035,7 @@ "target": "com.amazonaws.ssm#PatchComplianceMaxResults", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The maximum number of instances to return (per page).

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of managed nodes to return (per page).

                                                                      " } } } @@ -7046,7 +7046,7 @@ "InstancePatchStates": { "target": "com.amazonaws.ssm#InstancePatchStateList", "traits": { - "smithy.api#documentation": "

                                                                      The high-level patch state for the requested instances.

                                                                      " + "smithy.api#documentation": "

                                                                      The high-level patch state for the requested managed nodes.

                                                                      " } }, "NextToken": { @@ -7080,7 +7080,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves information about the patches on the specified instance and their state relative\n to the patch baseline being used for the instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      Retrieves information about the patches on the specified managed node and their state relative\n to the patch baseline being used for the node.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7095,7 +7095,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the instance whose patch state information should be retrieved.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the managed node whose patch state information should be retrieved.

                                                                      ", "smithy.api#required": {} } }, @@ -7471,7 +7471,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID or key-value pair to retrieve information about.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID or key-value pair to retrieve information about.

                                                                      " } }, "ResourceType": { @@ -7703,7 +7703,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves information about the maintenance window targets or tasks that an instance is\n associated with.

                                                                      ", + "smithy.api#documentation": "

                                                                      Retrieves information about the maintenance window targets or tasks that a managed node is\n associated with.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -7718,7 +7718,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID or key-value pair to retrieve information about.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node ID or key-value pair to retrieve information about.

                                                                      ", "smithy.api#required": {} } }, @@ -7750,7 +7750,7 @@ "WindowIdentities": { "target": "com.amazonaws.ssm#MaintenanceWindowsForTargetList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the maintenance window targets and tasks an instance is associated\n with.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the maintenance window targets and tasks a managed node is associated\n with.

                                                                      " } }, "NextToken": { @@ -8048,79 +8048,79 @@ "Instances": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances in the patch group.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes in the patch group.

                                                                      " } }, "InstancesWithInstalledPatches": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances with installed patches.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with installed patches.

                                                                      " } }, "InstancesWithInstalledOtherPatches": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances with patches installed that aren't defined in the patch\n baseline.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches installed that aren't defined in the patch\n baseline.

                                                                      " } }, "InstancesWithInstalledPendingRebootPatches": { "target": "com.amazonaws.ssm#InstancesCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances with patches installed by Patch Manager that haven't been rebooted\n after the patch installation. The status of these instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches installed by Patch Manager that haven't been\n rebooted after the patch installation. The status of these managed nodes is\n NON_COMPLIANT.

                                                                      " } }, "InstancesWithInstalledRejectedPatches": { "target": "com.amazonaws.ssm#InstancesCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances with patches installed that are specified in a\n RejectedPatches list. Patches with a status of INSTALLED_REJECTED were\n typically installed before they were added to a RejectedPatches list.

                                                                      \n \n

                                                                      If ALLOW_AS_DEPENDENCY is the specified option for\n RejectedPatchesAction, the value of\n InstancesWithInstalledRejectedPatches will always be 0 (zero).

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches installed that are specified in a\n RejectedPatches list. Patches with a status of INSTALLED_REJECTED were\n typically installed before they were added to a RejectedPatches list.

                                                                      \n \n

                                                                      If ALLOW_AS_DEPENDENCY is the specified option for\n RejectedPatchesAction, the value of\n InstancesWithInstalledRejectedPatches will always be 0 (zero).

                                                                      \n
                                                                      " } }, "InstancesWithMissingPatches": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances with missing patches from the patch baseline.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with missing patches from the patch baseline.

                                                                      " } }, "InstancesWithFailedPatches": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances with patches from the patch baseline that failed to install.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches from the patch baseline that failed to\n install.

                                                                      " } }, "InstancesWithNotApplicablePatches": { "target": "com.amazonaws.ssm#Integer", "traits": { - "smithy.api#documentation": "

                                                                      The number of instances with patches that aren't applicable.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches that aren't applicable.

                                                                      " } }, "InstancesWithUnreportedNotApplicablePatches": { "target": "com.amazonaws.ssm#Integer", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances with NotApplicable patches beyond the supported limit,\n which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with NotApplicable patches beyond the supported\n limit, which aren't reported by name to Inventory. Inventory is a capability of Amazon Web Services Systems Manager.

                                                                      " } }, "InstancesWithCriticalNonCompliantPatches": { "target": "com.amazonaws.ssm#InstancesCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances where patches that are specified as Critical for\n compliance reporting in the patch baseline aren't installed. These patches might be missing, have\n failed installation, were rejected, or were installed but awaiting a required instance reboot.\n The status of these instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes where patches that are specified as Critical for\n compliance reporting in the patch baseline aren't installed. These patches might be missing, have\n failed installation, were rejected, or were installed but awaiting a required managed node reboot.\n The status of these managed nodes is NON_COMPLIANT.

                                                                      " } }, "InstancesWithSecurityNonCompliantPatches": { "target": "com.amazonaws.ssm#InstancesCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances where patches that are specified as Security in a patch\n advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required instance reboot. The status of these\n instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes where patches that are specified as Security in a\n patch advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required managed node reboot. The status of these managed\n nodes is NON_COMPLIANT.

                                                                      " } }, "InstancesWithOtherNonCompliantPatches": { "target": "com.amazonaws.ssm#InstancesCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances with patches installed that are specified as other than\n Critical or Security but aren't compliant with the patch baseline. The\n status of these instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches installed that are specified as other than\n Critical or Security but aren't compliant with the patch baseline. The\n status of these managed nodes is NON_COMPLIANT.

                                                                      " } } } @@ -9505,7 +9505,7 @@ "code": "DuplicateInstanceId", "httpResponseCode": 404 }, - "smithy.api#documentation": "

                                                                      You can't specify an instance ID in more than one association.

                                                                      ", + "smithy.api#documentation": "

                                                                      You can't specify a managed node ID in more than one association.

                                                                      ", "smithy.api#error": "client" } }, @@ -9807,7 +9807,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns detailed information about command execution for an invocation or plugin.

                                                                      \n

                                                                      \n GetCommandInvocation only gives the execution status of a plugin in a document.\n To get the command execution status on a specific instance, use ListCommandInvocations. To get the command execution status across instances, use\n ListCommands.

                                                                      ", + "smithy.api#documentation": "

                                                                      Returns detailed information about command execution for an invocation or plugin.

                                                                      \n

                                                                      \n GetCommandInvocation only gives the execution status of a plugin in a document.\n To get the command execution status on a specific managed node, use ListCommandInvocations. To get the command execution status across managed nodes,\n use ListCommands.

                                                                      ", "smithy.waiters#waitable": { "CommandExecuted": { "acceptors": [ @@ -9916,7 +9916,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      (Required) The ID of the managed instance targeted by the command. A managed instance can be\n an Amazon Elastic Compute Cloud (Amazon EC2) instance or an instance in your hybrid environment that is configured for\n Amazon Web Services Systems Manager.

                                                                      ", + "smithy.api#documentation": "

                                                                      (Required) The ID of the managed node targeted by the command. A managed node can be an\n Amazon Elastic Compute Cloud (Amazon EC2) instance, edge device, and on-premises server or VM in your hybrid environment that is configured for Amazon Web Services Systems Manager.

                                                                      ", "smithy.api#required": {} } }, @@ -9940,7 +9940,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the managed instance targeted by the command. A managed instance can be an EC2\n instance or an instance in your hybrid environment that is configured for Systems Manager.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the managed node targeted by the command. A managed node can be an\n Amazon Elastic Compute Cloud (Amazon EC2) instance, edge device, or on-premises server or VM in your hybrid environment that is configured for Amazon Web Services Systems Manager.

                                                                      " } }, "Comment": { @@ -9970,7 +9970,7 @@ "ResponseCode": { "target": "com.amazonaws.ssm#ResponseCode", "traits": { - "smithy.api#documentation": "

                                                                      The error level response code for the plugin script. If the response code is\n -1, then the command hasn't started running on the instance, or it wasn't received\n by the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The error level response code for the plugin script. If the response code is\n -1, then the command hasn't started running on the managed node, or it wasn't received\n by the node.

                                                                      " } }, "ExecutionStartDateTime": { @@ -10000,7 +10000,7 @@ "StatusDetails": { "target": "com.amazonaws.ssm#StatusDetails", "traits": { - "smithy.api#documentation": "

                                                                      A detailed status of the command execution for an invocation. StatusDetails\n includes more information than Status because it includes states resulting from\n error and concurrency control parameters. StatusDetails can show different results\n than Status. For more information about these statuses, see Understanding\n command statuses in the Amazon Web Services Systems Manager User Guide.\n StatusDetails can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the instance but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delayed: The system attempted to send the command to the target, but the target wasn't\n available. The instance might not be available because of network issues, because the instance\n was stopped, or for similar reasons. The system will try to send the command again.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The command or plugin ran successfully. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the instance before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: The command started to run on the instance, but the execution wasn't\n complete before the timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't run successfully on the instance. For a plugin, this indicates\n that the result code wasn't zero. For a command invocation, this indicates that the result code\n for one or more plugins wasn't zero. Invocation failures count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the instance. The instance might not\n exist or might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit and don't contribute to whether the parent command\n status is Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent\n command invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      A detailed status of the command execution for an invocation. StatusDetails\n includes more information than Status because it includes states resulting from\n error and concurrency control parameters. StatusDetails can show different results\n than Status. For more information about these statuses, see Understanding\n command statuses in the Amazon Web Services Systems Manager User Guide.\n StatusDetails can be one of the following values:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Pending: The command hasn't been sent to the managed node.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        In Progress: The command has been sent to the managed node but hasn't reached a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delayed: The system attempted to send the command to the target, but the target wasn't\n available. The managed node might not be available because of network issues, because the node\n was stopped, or for similar reasons. The system will try to send the command again.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Success: The command or plugin ran successfully. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Delivery Timed Out: The command wasn't delivered to the managed node before the delivery\n timeout expired. Delivery timeouts don't count against the parent command's\n MaxErrors limit, but they do contribute to whether the parent command status is\n Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Execution Timed Out: The command started to run on the managed node, but the execution wasn't\n complete before the timeout expired. Execution timeouts count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Failed: The command wasn't run successfully on the managed node. For a plugin, this indicates\n that the result code wasn't zero. For a command invocation, this indicates that the result code\n for one or more plugins wasn't zero. Invocation failures count against the\n MaxErrors limit of the parent command. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Canceled: The command was terminated before it was completed. This is a terminal\n state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Undeliverable: The command can't be delivered to the managed node. The node might not\n exist or might not be responding. Undeliverable invocations don't count against the parent\n command's MaxErrors limit and don't contribute to whether the parent command\n status is Success or Incomplete. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Terminated: The parent command exceeded its MaxErrors limit and subsequent\n command invocations were canceled by the system. This is a terminal state.

                                                                        \n
                                                                      • \n
                                                                      " } }, "StandardOutputContent": { @@ -10049,7 +10049,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the Session Manager connection status for an instance to determine whether it is running and\n ready to receive Session Manager connections.

                                                                      " + "smithy.api#documentation": "

                                                                      Retrieves the Session Manager connection status for a managed node to determine whether it is running and\n ready to receive Session Manager connections.

                                                                      " } }, "com.amazonaws.ssm#GetConnectionStatusRequest": { @@ -10058,7 +10058,7 @@ "Target": { "target": "com.amazonaws.ssm#SessionTarget", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      ", "smithy.api#required": {} } } @@ -10070,13 +10070,13 @@ "Target": { "target": "com.amazonaws.ssm#SessionTarget", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the instance to check connection status.

                                                                      " + "smithy.api#documentation": "

                                                                      The ID of the managed node to check connection status.

                                                                      " } }, "Status": { "target": "com.amazonaws.ssm#ConnectionStatus", "traits": { - "smithy.api#documentation": "

                                                                      The status of the connection to the instance. For example, 'Connected' or 'Not\n Connected'.

                                                                      " + "smithy.api#documentation": "

                                                                      The status of the connection to the managed node. For example, 'Connected' or 'Not\n Connected'.

                                                                      " } } } @@ -10146,7 +10146,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Retrieves the current snapshot for the patch baseline the instance uses. This API is\n primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

                                                                      \n \n

                                                                      If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid\n this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of\n Amazon Web Services Systems Manager, with an SSM document that enables you to target an instance with a script or command.\n For example, run the command using the AWS-RunShellScript document or the\n AWS-RunPowerShellScript document.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Retrieves the current snapshot for the patch baseline the managed node uses. This API is\n primarily used by the AWS-RunPatchBaseline Systems Manager document (SSM document).

                                                                      \n \n

                                                                      If you run the command locally, such as with the Command Line Interface (CLI), the system attempts to use your local Amazon Web Services credentials and the operation fails. To avoid\n this, you can run the command in the Amazon Web Services Systems Manager console. Use Run Command, a capability of\n Amazon Web Services Systems Manager, with an SSM document that enables you to target a managed node with a script or command.\n For example, run the command using the AWS-RunShellScript document or the\n AWS-RunPowerShellScript document.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#GetDeployablePatchSnapshotForInstanceRequest": { @@ -10155,7 +10155,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the instance for which the appropriate patch snapshot should be retrieved.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the managed node for which the appropriate patch snapshot should be retrieved.

                                                                      ", "smithy.api#required": {} } }, @@ -10180,7 +10180,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "SnapshotId": { @@ -10198,7 +10198,7 @@ "Product": { "target": "com.amazonaws.ssm#Product", "traits": { - "smithy.api#documentation": "

                                                                      Returns the specific operating system (for example Windows Server 2012 or Amazon Linux\n 2015.09) on the instance for the specified patch snapshot.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns the specific operating system (for example Windows Server 2012 or Amazon Linux\n 2015.09) on the managed node for the specified patch snapshot.

                                                                      " } } } @@ -10371,7 +10371,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Query inventory information. This includes instance status, such as Stopped or\n Terminated.

                                                                      ", + "smithy.api#documentation": "

                                                                      Query inventory information. This includes managed node status, such as Stopped or\n Terminated.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -10392,7 +10392,7 @@ "Aggregators": { "target": "com.amazonaws.ssm#InventoryAggregatorList", "traits": { - "smithy.api#documentation": "

                                                                      Returns counts of inventory types based on one or more expressions. For example, if you\n aggregate by using an expression that uses the AWS:InstanceInformation.PlatformType\n type, you can see a count of how many Windows and Linux instances exist in your inventoried\n fleet.

                                                                      " + "smithy.api#documentation": "

                                                                      Returns counts of inventory types based on one or more expressions. For example, if you\n aggregate by using an expression that uses the AWS:InstanceInformation.PlatformType\n type, you can see a count of how many Windows and Linux managed nodes exist in your inventoried\n fleet.

                                                                      " } }, "ResultAttributes": { @@ -10422,7 +10422,7 @@ "Entities": { "target": "com.amazonaws.ssm#InventoryResultEntityList", "traits": { - "smithy.api#documentation": "

                                                                      Collection of inventory entities such as a collection of instance inventory.

                                                                      " + "smithy.api#documentation": "

                                                                      Collection of inventory entities such as a collection of managed node inventory.

                                                                      " } }, "NextToken": { @@ -11767,7 +11767,7 @@ "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the instances. The default value is false. Applies to Linux instances\n only.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the managed nodes. The default value is false. Applies to Linux managed\n nodes only.

                                                                      " } }, "RejectedPatches": { @@ -11809,7 +11809,7 @@ "Sources": { "target": "com.amazonaws.ssm#PatchSourceList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repositories. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.

                                                                      " } } } @@ -11986,7 +11986,7 @@ "InstanceAssociationStatusAggregatedCount": { "target": "com.amazonaws.ssm#InstanceAssociationStatusAggregatedCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of associations for the instance(s).

                                                                      " + "smithy.api#documentation": "

                                                                      The number of associations for the managed node(s).

                                                                      " } } }, @@ -12006,24 +12006,24 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "Content": { "target": "com.amazonaws.ssm#DocumentContent", "traits": { - "smithy.api#documentation": "

                                                                      The content of the association document for the instance(s).

                                                                      " + "smithy.api#documentation": "

                                                                      The content of the association document for the managed node(s).

                                                                      " } }, "AssociationVersion": { "target": "com.amazonaws.ssm#AssociationVersion", "traits": { - "smithy.api#documentation": "

                                                                      Version information for the association on the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Version information for the association on the managed node.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      One or more association documents on the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      One or more association documents on the managed node.

                                                                      " } }, "com.amazonaws.ssm#InstanceAssociationExecutionSummary": { @@ -12102,31 +12102,31 @@ "AssociationVersion": { "target": "com.amazonaws.ssm#AssociationVersion", "traits": { - "smithy.api#documentation": "

                                                                      The version of the association applied to the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The version of the association applied to the managed node.

                                                                      " } }, "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID where the association was created.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID where the association was created.

                                                                      " } }, "ExecutionDate": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The date the instance association ran.

                                                                      " + "smithy.api#documentation": "

                                                                      The date the association ran.

                                                                      " } }, "Status": { "target": "com.amazonaws.ssm#StatusName", "traits": { - "smithy.api#documentation": "

                                                                      Status information about the instance association.

                                                                      " + "smithy.api#documentation": "

                                                                      Status information about the association.

                                                                      " } }, "DetailedStatus": { "target": "com.amazonaws.ssm#StatusName", "traits": { - "smithy.api#documentation": "

                                                                      Detailed status information about the instance association.

                                                                      " + "smithy.api#documentation": "

                                                                      Detailed status information about the association.

                                                                      " } }, "ExecutionSummary": { @@ -12150,12 +12150,12 @@ "AssociationName": { "target": "com.amazonaws.ssm#AssociationName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the association applied to the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the association applied to the managed node.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Status information about the instance association.

                                                                      " + "smithy.api#documentation": "

                                                                      Status information about the association.

                                                                      " } }, "com.amazonaws.ssm#InstanceAssociationStatusInfos": { @@ -12191,7 +12191,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      " } }, "PingStatus": { @@ -12209,14 +12209,14 @@ "AgentVersion": { "target": "com.amazonaws.ssm#Version", "traits": { - "smithy.api#documentation": "

                                                                      The version of SSM Agent running on your Linux instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The version of SSM Agent running on your Linux managed node.

                                                                      " } }, "IsLatestVersion": { "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Indicates whether the latest version of SSM Agent is running on your Linux Managed Instance.\n This field doesn't indicate whether or not the latest version is installed on Windows managed\n instances, because some older versions of Windows Server use the EC2Config service to process\n Systems Manager requests.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the latest version of SSM Agent is running on your Linux managed node. This\n field doesn't indicate whether or not the latest version is installed on Windows managed nodes,\n because some older versions of Windows Server use the EC2Config service to process Systems Manager\n requests.

                                                                      " } }, "PlatformType": { @@ -12228,13 +12228,13 @@ "PlatformName": { "target": "com.amazonaws.ssm#String", "traits": { - "smithy.api#documentation": "

                                                                      The name of the operating system platform running on your instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the operating system platform running on your managed node.

                                                                      " } }, "PlatformVersion": { "target": "com.amazonaws.ssm#String", "traits": { - "smithy.api#documentation": "

                                                                      The version of the OS platform running on your instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The version of the OS platform running on your managed node.

                                                                      " } }, "ActivationId": { @@ -12246,13 +12246,13 @@ "IamRole": { "target": "com.amazonaws.ssm#IamRole", "traits": { - "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager\n managed instance. This call doesn't return the IAM role for Amazon Elastic Compute Cloud\n (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use\n the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in\n the Amazon Web Services CLI Command Reference.

                                                                      " + "smithy.api#documentation": "

                                                                      The Identity and Access Management (IAM) role assigned to the on-premises Systems Manager\n managed node. This call doesn't return the IAM role for Amazon Elastic Compute Cloud\n (Amazon EC2) instances. To retrieve the IAM role for an EC2 instance, use\n the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in\n the Amazon Web Services CLI Command Reference.

                                                                      " } }, "RegistrationDate": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The date the server or VM was registered with Amazon Web Services as a managed instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The date the server or VM was registered with Amazon Web Services as a managed node.

                                                                      " } }, "ResourceType": { @@ -12264,19 +12264,19 @@ "Name": { "target": "com.amazonaws.ssm#String", "traits": { - "smithy.api#documentation": "

                                                                      The name assigned to an on-premises server or virtual machine (VM) when it is activated as a\n Systems Manager managed instance. The name is specified as the DefaultInstanceName property\n using the CreateActivation command. It is applied to the managed instance by\n specifying the Activation Code and Activation ID when you install SSM Agent on the instance, as\n explained in Install SSM Agent for a\n hybrid environment (Linux) and Install SSM Agent for a\n hybrid environment (Windows). To retrieve the Name tag of an EC2 instance, use the Amazon EC2\n DescribeInstances operation. For information, see DescribeInstances in the\n Amazon EC2 API Reference or describe-instances in the\n Amazon Web Services CLI Command Reference.

                                                                      " + "smithy.api#documentation": "

                                                                      The name assigned to an on-premises server, edge device, or virtual machine (VM) when it is\n activated as a Systems Manager managed node. The name is specified as the DefaultInstanceName\n property using the CreateActivation command. It is applied to the managed node\n by specifying the Activation Code and Activation ID when you install SSM Agent on the node, as\n explained in Install SSM Agent for a\n hybrid environment (Linux) and Install SSM Agent for a\n hybrid environment (Windows). To retrieve the Name tag of an EC2 instance,\n use the Amazon EC2 DescribeInstances operation. For information, see DescribeInstances in the Amazon EC2 API Reference or describe-instances in\n the Amazon Web Services CLI Command Reference.

                                                                      " } }, "IPAddress": { "target": "com.amazonaws.ssm#IPAddress", "traits": { - "smithy.api#documentation": "

                                                                      The IP address of the managed instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The IP address of the managed node.

                                                                      " } }, "ComputerName": { "target": "com.amazonaws.ssm#ComputerName", "traits": { - "smithy.api#documentation": "

                                                                      The fully qualified host name of the managed instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The fully qualified host name of the managed node.

                                                                      " } }, "AssociationStatus": { @@ -12302,10 +12302,22 @@ "traits": { "smithy.api#documentation": "

                                                                      Information about the association.

                                                                      " } + }, + "SourceId": { + "target": "com.amazonaws.ssm#SourceId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID of the source resource. For IoT Greengrass devices, SourceId is\n the Thing name.

                                                                      " + } + }, + "SourceType": { + "target": "com.amazonaws.ssm#SourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the source resource. For IoT Greengrass devices, SourceType\n is AWS::IoT::Thing.

                                                                      " + } } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter for a specific list of instances.

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter for a specific list of managed nodes.

                                                                      " } }, "com.amazonaws.ssm#InstanceInformationFilter": { @@ -12327,7 +12339,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Describes a filter for a specific list of instances. You can filter instances information by\n using tags. You specify tags by using a key-value mapping.

                                                                      \n

                                                                      Use this operation instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The\n InstanceInformationFilterList method is a legacy method and doesn't support tags.\n

                                                                      " + "smithy.api#documentation": "

                                                                      Describes a filter for a specific list of managed nodes. You can filter node information by\n using tags. You specify tags by using a key-value mapping.

                                                                      \n

                                                                      Use this operation instead of the DescribeInstanceInformationRequest$InstanceInformationFilterList method. The\n InstanceInformationFilterList method is a legacy method and doesn't support tags.\n

                                                                      " } }, "com.amazonaws.ssm#InstanceInformationFilterKey": { @@ -12421,7 +12433,7 @@ "Key": { "target": "com.amazonaws.ssm#InstanceInformationStringFilterKey", "traits": { - "smithy.api#documentation": "

                                                                      The filter key name to describe your instances. For example:

                                                                      \n

                                                                      \"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag\n Key\"

                                                                      \n \n

                                                                      \n Tag key isn't a valid filter. You must specify either tag-key or\n tag:keyname and a string. Here are some valid examples: tag-key, tag:123, tag:al!,\n tag:Windows. Here are some invalid examples: tag-keys, Tag Key, tag:,\n tagKey, abc:keyname.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The filter key name to describe your managed nodes. For example:

                                                                      \n

                                                                      \"InstanceIds\"|\"AgentVersion\"|\"PingStatus\"|\"PlatformTypes\"|\"ActivationIds\"|\"IamRole\"|\"ResourceType\"|\"AssociationStatus\"|\"Tag\n Key\"

                                                                      \n \n

                                                                      \n Tag key isn't a valid filter. You must specify either tag-key or\n tag:keyname and a string. Here are some valid examples: tag-key, tag:123, tag:al!,\n tag:Windows. Here are some invalid examples: tag-keys, Tag Key, tag:,\n tagKey, abc:keyname.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, @@ -12434,7 +12446,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The filters to describe or get information about your managed instances.

                                                                      " + "smithy.api#documentation": "

                                                                      The filters to describe or get information about your managed nodes.

                                                                      " } }, "com.amazonaws.ssm#InstanceInformationStringFilterKey": { @@ -12465,21 +12477,21 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the managed instance the high-level patch compliance information was collected\n for.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the managed node the high-level patch compliance information was collected\n for.

                                                                      ", "smithy.api#required": {} } }, "PatchGroup": { "target": "com.amazonaws.ssm#PatchGroup", "traits": { - "smithy.api#documentation": "

                                                                      The name of the patch group the managed instance belongs to.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the patch group the managed node belongs to.

                                                                      ", "smithy.api#required": {} } }, "BaselineId": { "target": "com.amazonaws.ssm#BaselineId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the patch baseline used to patch the instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the patch baseline used to patch the managed node.

                                                                      ", "smithy.api#required": {} } }, @@ -12504,33 +12516,33 @@ "InstalledCount": { "target": "com.amazonaws.ssm#PatchInstalledCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that are installed on the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that are installed on the managed node.

                                                                      " } }, "InstalledOtherCount": { "target": "com.amazonaws.ssm#PatchInstalledOtherCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of patches not specified in the patch baseline that are installed on the\n instance.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches not specified in the patch baseline that are installed on the\n managed node.

                                                                      " } }, "InstalledPendingRebootCount": { "target": "com.amazonaws.ssm#PatchInstalledPendingRebootCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of patches installed by Patch Manager since the last time the instance was\n rebooted.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches installed by Patch Manager since the last time the managed node was\n rebooted.

                                                                      " } }, "InstalledRejectedCount": { "target": "com.amazonaws.ssm#PatchInstalledRejectedCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of patches installed on an instance that are specified in a\n RejectedPatches list. Patches with a status of InstalledRejected were\n typically installed before they were added to a RejectedPatches list.

                                                                      \n \n

                                                                      If ALLOW_AS_DEPENDENCY is the specified option for\n RejectedPatchesAction, the value of InstalledRejectedCount will\n always be 0 (zero).

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches installed on a managed node that are specified in a\n RejectedPatches list. Patches with a status of InstalledRejected were\n typically installed before they were added to a RejectedPatches list.

                                                                      \n \n

                                                                      If ALLOW_AS_DEPENDENCY is the specified option for\n RejectedPatchesAction, the value of InstalledRejectedCount will\n always be 0 (zero).

                                                                      \n
                                                                      " } }, "MissingCount": { "target": "com.amazonaws.ssm#PatchMissingCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that are applicable for the instance but\n aren't currently installed.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that are applicable for the managed node but\n aren't currently installed.

                                                                      " } }, "FailedCount": { @@ -12549,20 +12561,20 @@ "NotApplicableCount": { "target": "com.amazonaws.ssm#PatchNotApplicableCount", "traits": { - "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that aren't applicable for the instance and\n therefore aren't installed on the instance. This number may be truncated if the list of patch\n names is very large. The number of patches beyond this limit are reported in\n UnreportedNotApplicableCount.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of patches from the patch baseline that aren't applicable for the managed node and\n therefore aren't installed on the node. This number may be truncated if the list of patch\n names is very large. The number of patches beyond this limit are reported in\n UnreportedNotApplicableCount.

                                                                      " } }, "OperationStartTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The time the most recent patching operation was started on the instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      The time the most recent patching operation was started on the managed node.

                                                                      ", "smithy.api#required": {} } }, "OperationEndTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The time the most recent patching operation completed on the instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      The time the most recent patching operation completed on the managed node.

                                                                      ", "smithy.api#required": {} } }, @@ -12576,39 +12588,39 @@ "LastNoRebootInstallOperationTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The time of the last attempt to patch the instance with NoReboot specified as\n the reboot option.

                                                                      " + "smithy.api#documentation": "

                                                                      The time of the last attempt to patch the managed node with NoReboot specified as\n the reboot option.

                                                                      " } }, "RebootOption": { "target": "com.amazonaws.ssm#RebootOption", "traits": { - "smithy.api#documentation": "

                                                                      Indicates the reboot option specified in the patch baseline.

                                                                      \n \n

                                                                      Reboot options apply to Install operations only. Reboots aren't attempted for\n Patch Manager Scan operations.

                                                                      \n
                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n RebootIfNeeded: Patch Manager tries to reboot the instance if it installed\n any patches, or if any patches are detected with a status of\n InstalledPendingReboot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NoReboot: Patch Manager attempts to install missing packages without trying\n to reboot the system. Patches installed with this option are assigned a status of\n InstalledPendingReboot. These patches might not be in effect until a reboot is\n performed.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Indicates the reboot option specified in the patch baseline.

                                                                      \n \n

                                                                      Reboot options apply to Install operations only. Reboots aren't attempted for\n Patch Manager Scan operations.

                                                                      \n
                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n RebootIfNeeded: Patch Manager tries to reboot the managed node if it installed\n any patches, or if any patches are detected with a status of\n InstalledPendingReboot.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n NoReboot: Patch Manager attempts to install missing packages without trying\n to reboot the system. Patches installed with this option are assigned a status of\n InstalledPendingReboot. These patches might not be in effect until a reboot is\n performed.

                                                                        \n
                                                                      • \n
                                                                      " } }, "CriticalNonCompliantCount": { "target": "com.amazonaws.ssm#PatchCriticalNonCompliantCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances where patches that are specified as Critical for\n compliance reporting in the patch baseline aren't installed. These patches might be missing, have\n failed installation, were rejected, or were installed but awaiting a required instance reboot.\n The status of these instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes where patches that are specified as Critical for\n compliance reporting in the patch baseline aren't installed. These patches might be missing, have\n failed installation, were rejected, or were installed but awaiting a required managed node reboot.\n The status of these managed nodes is NON_COMPLIANT.

                                                                      " } }, "SecurityNonCompliantCount": { "target": "com.amazonaws.ssm#PatchSecurityNonCompliantCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances where patches that are specified as Security in a patch\n advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required instance reboot. The status of these\n instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes where patches that are specified as Security in a\n patch advisory aren't installed. These patches might be missing, have failed installation, were\n rejected, or were installed but awaiting a required managed node reboot. The status of these managed\n nodes is NON_COMPLIANT.

                                                                      " } }, "OtherNonCompliantCount": { "target": "com.amazonaws.ssm#PatchOtherNonCompliantCount", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      The number of instances with patches installed that are specified as other than\n Critical or Security but aren't compliant with the patch baseline. The\n status of these instances is NON_COMPLIANT.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes with patches installed that are specified as other than\n Critical or Security but aren't compliant with the patch baseline. The\n status of these managed nodes is NON_COMPLIANT.

                                                                      " } } }, "traits": { - "smithy.api#documentation": "

                                                                      Defines the high-level patch compliance state for a managed instance, providing information\n about the number of installed, missing, not applicable, and failed patches along with metadata\n about the operation when this information was gathered for the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Defines the high-level patch compliance state for a managed node, providing information\n about the number of installed, missing, not applicable, and failed patches along with metadata\n about the operation when this information was gathered for the managed node.

                                                                      " } }, "com.amazonaws.ssm#InstancePatchStateFilter": { @@ -12637,7 +12649,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Defines a filter used in DescribeInstancePatchStatesForPatchGroup to scope\n down the information returned by the API.

                                                                      \n

                                                                      \n Example: To filter for all instances in a patch group\n having more than three patches with a FailedCount status, use the following for the\n filter:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Value for Key: FailedCount\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Value for Type: GreaterThan\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Value for Values: 3\n

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Defines a filter used in DescribeInstancePatchStatesForPatchGroup to scope\n down the information returned by the API.

                                                                      \n

                                                                      \n Example: To filter for all managed nodes in a patch group\n having more than three patches with a FailedCount status, use the following for the\n filter:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        Value for Key: FailedCount\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Value for Type: GreaterThan\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Value for Values: 3\n

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.ssm#InstancePatchStateFilterKey": { @@ -13116,7 +13128,7 @@ "code": "InvalidInstanceId", "httpResponseCode": 404 }, - "smithy.api#documentation": "

                                                                      The following problems can cause this exception:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        You don't have permission to access the instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon Web Services Systems Manager Agent(SSM Agent) isn't running. Verify that SSM Agent is\n running.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM Agent.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The instance isn't in valid state. Valid states are: Running,\n Pending, Stopped, and Stopping. Invalid states are:\n Shutting-down and Terminated.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The following problems can cause this exception:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        You don't have permission to access the managed node.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Amazon Web Services Systems Manager Agent(SSM Agent) isn't running. Verify that SSM Agent is\n running.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        SSM Agent isn't registered with the SSM endpoint. Try reinstalling SSM Agent.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        The managed node isn't in valid state. Valid states are: Running,\n Pending, Stopped, and Stopping. Invalid states are:\n Shutting-down and Terminated.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#error": "client" } }, @@ -13387,7 +13399,7 @@ "code": "InvalidResourceType", "httpResponseCode": 400 }, - "smithy.api#documentation": "

                                                                      The resource type isn't valid. For example, if you are attempting to tag an instance, the\n instance must be a registered, managed instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      The resource type isn't valid. For example, if you are attempting to tag an EC2 instance, the\n instance must be a registered managed node.

                                                                      ", "smithy.api#error": "client" } }, @@ -13703,7 +13715,7 @@ "Values": { "target": "com.amazonaws.ssm#InventoryFilterValueList", "traits": { - "smithy.api#documentation": "

                                                                      Inventory filter values. Example: inventory filter where instance IDs are specified as\n values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g,\n i-1a2b3c4d5e6,Type=Equal.

                                                                      ", + "smithy.api#documentation": "

                                                                      Inventory filter values. Example: inventory filter where managed node IDs are specified as\n values Key=AWS:InstanceInformation.InstanceId,Values= i-a12b3c4d5e6g,\n i-1a2b3c4d5e6,Type=Equal.

                                                                      ", "smithy.api#required": {} } }, @@ -13850,7 +13862,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Information collected from managed instances based on your inventory policy document

                                                                      " + "smithy.api#documentation": "

                                                                      Information collected from managed nodes based on your inventory policy document

                                                                      " } }, "com.amazonaws.ssm#InventoryItemAttribute": { @@ -14067,7 +14079,7 @@ "Id": { "target": "com.amazonaws.ssm#InventoryResultEntityId", "traits": { - "smithy.api#documentation": "

                                                                      ID of the inventory result entity. For example, for managed instance inventory the result\n will be the managed instance ID. For EC2 instance inventory, the result will be the instance ID.\n

                                                                      " + "smithy.api#documentation": "

                                                                      ID of the inventory result entity. For example, for managed node inventory the result\n will be the managed node ID. For EC2 instance inventory, the result will be the instance ID.\n

                                                                      " } }, "Data": { @@ -14172,7 +14184,7 @@ "code": "InvocationDoesNotExist", "httpResponseCode": 400 }, - "smithy.api#documentation": "

                                                                      The command ID and instance ID you specified didn't match any invocations. Verify the\n command ID and the instance ID and try again.

                                                                      ", + "smithy.api#documentation": "

                                                                      The command ID and managed node ID you specified didn't match any invocations. Verify the\n command ID and the managed node ID and try again.

                                                                      ", "smithy.api#error": "client" } }, @@ -14420,7 +14432,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You\n can limit the results to a specific State Manager association document or instance by specifying\n a filter. State Manager is a capability of Amazon Web Services Systems Manager.

                                                                      ", + "smithy.api#documentation": "

                                                                      Returns all State Manager associations in the current Amazon Web Services account and Amazon Web Services Region. You\n can limit the results to a specific State Manager association document or managed node by specifying\n a filter. State Manager is a capability of Amazon Web Services Systems Manager.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14435,7 +14447,7 @@ "AssociationFilterList": { "target": "com.amazonaws.ssm#AssociationFilterList", "traits": { - "smithy.api#documentation": "

                                                                      One or more filters. Use a filter to return a more specific list of results.

                                                                      \n \n

                                                                      Filtering associations using the InstanceID attribute only returns legacy\n associations created using the InstanceID attribute. Associations targeting the\n instance that are part of the Target Attributes ResourceGroup or Tags\n aren't returned.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      One or more filters. Use a filter to return a more specific list of results.

                                                                      \n \n

                                                                      Filtering associations using the InstanceID attribute only returns legacy\n associations created using the InstanceID attribute. Associations targeting the\n managed node that are part of the Target Attributes ResourceGroup or Tags\n aren't returned.

                                                                      \n
                                                                      " } }, "MaxResults": { @@ -14496,7 +14508,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      An invocation is copy of a command sent to a specific instance. A command can apply to one\n or more instances. A command invocation applies to one instance. For example, if a user runs\n SendCommand against three instances, then a command invocation is created for each\n requested instance ID. ListCommandInvocations provide status about command\n execution.

                                                                      ", + "smithy.api#documentation": "

                                                                      An invocation is copy of a command sent to a specific managed node. A command can apply to one\n or more managed nodes. A command invocation applies to one managed node. For example, if a user runs\n SendCommand against three managed nodes, then a command invocation is created for\n each requested managed node ID. ListCommandInvocations provide status about command\n execution.

                                                                      ", "smithy.api#paginated": { "inputToken": "NextToken", "outputToken": "NextToken", @@ -14517,7 +14529,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) The command execution details for a specific instance ID.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) The command execution details for a specific managed node ID.

                                                                      " } }, "MaxResults": { @@ -14611,7 +14623,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) Lists commands issued against this instance ID.

                                                                      \n \n

                                                                      You can't specify an instance ID in the same command that you specify Status =\n Pending. This is because the command hasn't reached the instance yet.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) Lists commands issued against this managed node ID.

                                                                      \n \n

                                                                      You can't specify a managed node ID in the same command that you specify Status =\n Pending. This is because the command hasn't reached the managed node yet.

                                                                      \n
                                                                      " } }, "MaxResults": { @@ -15091,7 +15103,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID for which you want inventory information.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node ID for which you want inventory information.

                                                                      ", "smithy.api#required": {} } }, @@ -15135,25 +15147,25 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID targeted by the request to query inventory information.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node ID targeted by the request to query inventory information.

                                                                      " } }, "SchemaVersion": { "target": "com.amazonaws.ssm#InventoryItemSchemaVersion", "traits": { - "smithy.api#documentation": "

                                                                      The inventory schema version used by the instance(s).

                                                                      " + "smithy.api#documentation": "

                                                                      The inventory schema version used by the managed node(s).

                                                                      " } }, "CaptureTime": { "target": "com.amazonaws.ssm#InventoryItemCaptureTime", "traits": { - "smithy.api#documentation": "

                                                                      The time that inventory information was collected for the instance(s).

                                                                      " + "smithy.api#documentation": "

                                                                      The time that inventory information was collected for the managed node(s).

                                                                      " } }, "Entries": { "target": "com.amazonaws.ssm#InventoryItemEntryList", "traits": { - "smithy.api#documentation": "

                                                                      A list of inventory items on the instance(s).

                                                                      " + "smithy.api#documentation": "

                                                                      A list of inventory items on the managed node(s).

                                                                      " } }, "NextToken": { @@ -15443,7 +15455,7 @@ "ResourceComplianceSummaryItems": { "target": "com.amazonaws.ssm#ResourceComplianceSummaryItemList", "traits": { - "smithy.api#documentation": "

                                                                      A summary count for specified or targeted managed instances. Summary count includes\n information about compliant and non-compliant State Manager associations, patch status, or custom\n items according to the filter criteria that you specify.

                                                                      " + "smithy.api#documentation": "

                                                                      A summary count for specified or targeted managed nodes. Summary count includes information\n about compliant and non-compliant State Manager associations, patch status, or custom items\n according to the filter criteria that you specify.

                                                                      " } }, "NextToken": { @@ -15602,7 +15614,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Information about an Amazon Simple Storage Service (Amazon S3) bucket to write\n instance-level logs to.

                                                                      \n \n

                                                                      \n LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the\n OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure.\n For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance\n window task types, see MaintenanceWindowTaskInvocationParameters.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Information about an Amazon Simple Storage Service (Amazon S3) bucket to write\n managed node-level logs to.

                                                                      \n \n

                                                                      \n LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the\n OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure.\n For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance\n window task types, see MaintenanceWindowTaskInvocationParameters.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#Long": { @@ -16256,7 +16268,7 @@ "NotificationConfig": { "target": "com.amazonaws.ssm#NotificationConfig", "traits": { - "smithy.api#documentation": "

                                                                      Configurations for sending notifications about command status changes on a per-instance\n basis.

                                                                      " + "smithy.api#documentation": "

                                                                      Configurations for sending notifications about command status changes on a per-managed node\n basis.

                                                                      " } }, "OutputS3BucketName": { @@ -16378,7 +16390,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets, either instances or tags.

                                                                      \n

                                                                      Specify instances using the following format:

                                                                      \n

                                                                      \n Key=instanceids,Values=,\n

                                                                      \n

                                                                      Tags are specified using the following format:

                                                                      \n

                                                                      \n Key=,Values=.

                                                                      " + "smithy.api#documentation": "

                                                                      The targets, either managed nodes or tags.

                                                                      \n

                                                                      Specify managed nodes using the following format:

                                                                      \n

                                                                      \n Key=instanceids,Values=,\n

                                                                      \n

                                                                      Tags are specified using the following format:

                                                                      \n

                                                                      \n Key=,Values=.

                                                                      " } }, "OwnerInformation": { @@ -16450,7 +16462,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets (either instances or tags). Instances are specified using\n Key=instanceids,Values=,. Tags are specified\n using Key=,Values=.

                                                                      " + "smithy.api#documentation": "

                                                                      The targets (either managed nodes or tags). Managed nodes are specified using\n Key=instanceids,Values=,. Tags are specified\n using Key=,Values=.

                                                                      " } }, "TaskParameters": { @@ -16954,7 +16966,7 @@ "NotificationType": { "target": "com.amazonaws.ssm#NotificationType", "traits": { - "smithy.api#documentation": "

                                                                      The type of notification.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Command: Receive notification when the status of a command changes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Invocation: For commands sent to multiple instances, receive notification on\n a per-instance basis when the status of a command changes.

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      The type of notification.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Command: Receive notification when the status of a command changes.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Invocation: For commands sent to multiple managed nodes, receive notification\n on a per-node basis when the status of a command changes.

                                                                        \n
                                                                      • \n
                                                                      " } } }, @@ -19577,49 +19589,49 @@ "AdvisoryIds": { "target": "com.amazonaws.ssm#PatchAdvisoryIdList", "traits": { - "smithy.api#documentation": "

                                                                      The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to\n Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The Advisory ID of the patch. For example, RHSA-2020:3779. Applies to\n Linux-based managed nodes only.

                                                                      " } }, "BugzillaIds": { "target": "com.amazonaws.ssm#PatchBugzillaIdList", "traits": { - "smithy.api#documentation": "

                                                                      The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based\n instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The Bugzilla ID of the patch. For example, 1600646. Applies to Linux-based\n managed nodes only.

                                                                      " } }, "CVEIds": { "target": "com.amazonaws.ssm#PatchCVEIdList", "traits": { - "smithy.api#documentation": "

                                                                      The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example,\n CVE-2011-3192. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The Common Vulnerabilities and Exposures (CVE) ID of the patch. For example,\n CVE-2011-3192. Applies to Linux-based managed nodes only.

                                                                      " } }, "Name": { "target": "com.amazonaws.ssm#PatchName", "traits": { - "smithy.api#documentation": "

                                                                      The name of the patch. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the patch. Applies to Linux-based managed nodes only.

                                                                      " } }, "Epoch": { "target": "com.amazonaws.ssm#PatchEpoch", "traits": { - "smithy.api#documentation": "

                                                                      The epoch of the patch. For example in\n pkg-example-EE-20180914-2.2.amzn1.noarch, the epoch value is\n 20180914-2. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The epoch of the patch. For example in\n pkg-example-EE-20180914-2.2.amzn1.noarch, the epoch value is\n 20180914-2. Applies to Linux-based managed nodes only.

                                                                      " } }, "Version": { "target": "com.amazonaws.ssm#PatchVersion", "traits": { - "smithy.api#documentation": "

                                                                      The version number of the patch. For example, in\n example-pkg-1.710.10-2.7.abcd.x86_64, the version number is indicated by\n -1. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The version number of the patch. For example, in\n example-pkg-1.710.10-2.7.abcd.x86_64, the version number is indicated by\n -1. Applies to Linux-based managed nodes only.

                                                                      " } }, "Release": { "target": "com.amazonaws.ssm#PatchRelease", "traits": { - "smithy.api#documentation": "

                                                                      The particular release of a patch. For example, in\n pkg-example-EE-20180914-2.2.amzn1.noarch, the release is 2.amaz1.\n Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The particular release of a patch. For example, in\n pkg-example-EE-20180914-2.2.amzn1.noarch, the release is 2.amaz1.\n Applies to Linux-based managed nodes only.

                                                                      " } }, "Arch": { "target": "com.amazonaws.ssm#PatchArch", "traits": { - "smithy.api#documentation": "

                                                                      The architecture of the patch. For example, in\n example-pkg-0.710.10-2.7.abcd.x86_64, the architecture is indicated by\n x86_64. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The architecture of the patch. For example, in\n example-pkg-0.710.10-2.7.abcd.x86_64, the architecture is indicated by\n x86_64. Applies to Linux-based managed nodes only.

                                                                      " } }, "Severity": { @@ -19631,7 +19643,7 @@ "Repository": { "target": "com.amazonaws.ssm#PatchRepository", "traits": { - "smithy.api#documentation": "

                                                                      The source patch repository for the operating system and version, such as\n trusty-security for Ubuntu Server 14.04 LTE and focal-security for\n Ubuntu Server 20.04 LTE. Applies to Linux-based instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      The source patch repository for the operating system and version, such as\n trusty-security for Ubuntu Server 14.04 LTE and focal-security for\n Ubuntu Server 20.04 LTE. Applies to Linux-based managed nodes only.

                                                                      " } } }, @@ -19777,14 +19789,14 @@ "State": { "target": "com.amazonaws.ssm#PatchComplianceDataState", "traits": { - "smithy.api#documentation": "

                                                                      The state of the patch on the instance, such as INSTALLED or FAILED.

                                                                      \n

                                                                      For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The state of the patch on the managed node, such as INSTALLED or FAILED.

                                                                      \n

                                                                      For descriptions of each patch state, see About patch compliance in the Amazon Web Services Systems Manager User Guide.

                                                                      ", "smithy.api#required": {} } }, "InstalledTime": { "target": "com.amazonaws.ssm#DateTime", "traits": { - "smithy.api#documentation": "

                                                                      The date/time the patch was installed on the instance. Not all operating systems provide\n this level of information.

                                                                      ", + "smithy.api#documentation": "

                                                                      The date/time the patch was installed on the managed node. Not all operating systems provide\n this level of information.

                                                                      ", "smithy.api#required": {} } }, @@ -19796,7 +19808,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Information about the state of a patch on a particular instance as it relates to the patch\n baseline used to patch the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the state of a patch on a particular managed node as it relates to the patch\n baseline used to patch the node.

                                                                      " } }, "com.amazonaws.ssm#PatchComplianceDataList": { @@ -20338,7 +20350,7 @@ "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      For instances identified by the approval rule filters, enables a patch baseline to apply\n non-security updates available in the specified repository. The default value is\n false. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      For managed nodes identified by the approval rule filters, enables a patch baseline to apply\n non-security updates available in the specified repository. The default value is\n false. Applies to Linux managed nodes only.

                                                                      " } } }, @@ -20420,7 +20432,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repository. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repository. Applies to Linux managed nodes only.

                                                                      " } }, "com.amazonaws.ssm#PatchSourceConfiguration": { @@ -20549,6 +20561,10 @@ { "value": "Linux", "name": "LINUX" + }, + { + "value": "MacOS", + "name": "MACOS" } ] } @@ -20651,7 +20667,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Registers a compliance type and other compliance details on a designated resource. This\n operation lets you register custom compliance details with a resource. This call overwrites\n existing compliance information on the resource, so you must provide a full list of compliance\n items each time that you send the request.

                                                                      \n

                                                                      ComplianceType can be one of the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        ExecutionId: The execution ID when the patch, association, or custom compliance item was\n applied.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ExecutionType: Specify patch, association, or Custom:string.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ExecutionTime. The time the patch, association, or custom compliance item was applied to\n the instance.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Id: The patch, association, or custom compliance ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Title: A title.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Status: The status of the compliance item. For example, approved for patches,\n or Failed for associations.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Severity: A patch severity. For example, critical.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DocumentName: An SSM document name. For example, AWS-RunPatchBaseline.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DocumentVersion: An SSM document version number. For example, 4.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Classification: A patch classification. For example, security updates.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchBaselineId: A patch baseline ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchSeverity: A patch severity. For example, Critical.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchState: A patch state. For example, InstancesWithFailedPatches.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchGroup: The name of a patch group.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        InstalledTime: The time the association, patch, or custom compliance item was applied to\n the resource. Specify the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

                                                                        \n
                                                                      • \n
                                                                      " + "smithy.api#documentation": "

                                                                      Registers a compliance type and other compliance details on a designated resource. This\n operation lets you register custom compliance details with a resource. This call overwrites\n existing compliance information on the resource, so you must provide a full list of compliance\n items each time that you send the request.

                                                                      \n

                                                                      ComplianceType can be one of the following:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        ExecutionId: The execution ID when the patch, association, or custom compliance item was\n applied.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ExecutionType: Specify patch, association, or Custom:string.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        ExecutionTime. The time the patch, association, or custom compliance item was applied to\n the managed node.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Id: The patch, association, or custom compliance ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Title: A title.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Status: The status of the compliance item. For example, approved for patches,\n or Failed for associations.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Severity: A patch severity. For example, critical.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DocumentName: An SSM document name. For example, AWS-RunPatchBaseline.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        DocumentVersion: An SSM document version number. For example, 4.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Classification: A patch classification. For example, security updates.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchBaselineId: A patch baseline ID.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchSeverity: A patch severity. For example, Critical.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchState: A patch state. For example, InstancesWithFailedPatches.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        PatchGroup: The name of a patch group.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        InstalledTime: The time the association, patch, or custom compliance item was applied to\n the resource. Specify the time by using the following format: yyyy-MM-dd'T'HH:mm:ss'Z'

                                                                        \n
                                                                      • \n
                                                                      " } }, "com.amazonaws.ssm#PutComplianceItemsRequest": { @@ -20660,7 +20676,7 @@ "ResourceId": { "target": "com.amazonaws.ssm#ComplianceResourceId", "traits": { - "smithy.api#documentation": "

                                                                      Specify an ID for this resource. For a managed instance, this is the instance ID.

                                                                      ", + "smithy.api#documentation": "

                                                                      Specify an ID for this resource. For a managed node, this is the node ID.

                                                                      ", "smithy.api#required": {} } }, @@ -20757,7 +20773,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Bulk update custom inventory items on one more instance. The request adds an inventory item,\n if it doesn't already exist, or updates an inventory item, if it does exist.

                                                                      " + "smithy.api#documentation": "

                                                                      Bulk update custom inventory items on one or more managed nodes. The request adds an inventory item,\n if it doesn't already exist, or updates an inventory item, if it does exist.

                                                                      " } }, "com.amazonaws.ssm#PutInventoryMessage": { @@ -20769,14 +20785,14 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      An instance ID where you want to add or update inventory items.

                                                                      ", + "smithy.api#documentation": "

                                                                      An managed node ID where you want to add or update inventory items.

                                                                      ", "smithy.api#required": {} } }, "Items": { "target": "com.amazonaws.ssm#InventoryItemList", "traits": { - "smithy.api#documentation": "

                                                                      The inventory items that you want to add or update on instances.

                                                                      ", + "smithy.api#documentation": "

                                                                      The inventory items that you want to add or update on managed nodes.

                                                                      ", "smithy.api#required": {} } } @@ -21130,7 +21146,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets to register with the maintenance window. In other words, the instances to run\n commands on when the maintenance window runs.

                                                                      \n \n

                                                                      If a single maintenance window task is registered with multiple targets, its task\n invocations occur sequentially and not in parallel. If your task must run on multiple targets at\n the same time, register a task for each target individually and assign each task the same\n priority level.

                                                                      \n
                                                                      \n

                                                                      You can specify targets using instance IDs, resource group names, or tags that have been\n applied to instances.

                                                                      \n

                                                                      \n Example 1: Specify instance IDs

                                                                      \n

                                                                      \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n \n

                                                                      \n

                                                                      \n Example 2: Use tag key-pairs applied to instances

                                                                      \n

                                                                      \n Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2\n \n

                                                                      \n

                                                                      \n Example 3: Use tag-keys applied to instances

                                                                      \n

                                                                      \n Key=tag-key,Values=my-tag-key-1,my-tag-key-2\n \n

                                                                      \n \n

                                                                      \n Example 4: Use resource group names

                                                                      \n

                                                                      \n Key=resource-groups:Name,Values=resource-group-name\n \n

                                                                      \n

                                                                      \n Example 5: Use filters for resource group types

                                                                      \n

                                                                      \n Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2\n \n

                                                                      \n \n

                                                                      For Key=resource-groups:ResourceTypeFilters, specify resource types in the\n following format

                                                                      \n

                                                                      \n Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC\n \n

                                                                      \n
                                                                      \n \n

                                                                      For more information about these examples formats, including the best use case for each one,\n see Examples: Register\n targets with a maintenance window in the Amazon Web Services Systems Manager User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      The targets to register with the maintenance window. In other words, the managed nodes to\n run commands on when the maintenance window runs.

                                                                      \n \n

                                                                      If a single maintenance window task is registered with multiple targets, its task\n invocations occur sequentially and not in parallel. If your task must run on multiple targets at\n the same time, register a task for each target individually and assign each task the same\n priority level.

                                                                      \n
                                                                      \n

                                                                      You can specify targets using managed node IDs, resource group names, or tags that have been\n applied to managed nodes.

                                                                      \n

                                                                      \n Example 1: Specify managed node IDs

                                                                      \n

                                                                      \n Key=InstanceIds,Values=,,\n

                                                                      \n

                                                                      \n Example 2: Use tag key-pairs applied to managed\n nodes

                                                                      \n

                                                                      \n Key=tag:,Values=,\n

                                                                      \n

                                                                      \n Example 3: Use tag-keys applied to managed nodes

                                                                      \n

                                                                      \n Key=tag-key,Values=,\n

                                                                      \n \n

                                                                      \n Example 4: Use resource group names

                                                                      \n

                                                                      \n Key=resource-groups:Name,Values=\n

                                                                      \n

                                                                      \n Example 5: Use filters for resource group types

                                                                      \n

                                                                      \n Key=resource-groups:ResourceTypeFilters,Values=,\n

                                                                      \n \n

                                                                      For Key=resource-groups:ResourceTypeFilters, specify resource types in the\n following format

                                                                      \n

                                                                      \n Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC\n

                                                                      \n
                                                                      \n \n

                                                                      For more information about these examples formats, including the best use case for each one,\n see Examples: Register\n targets with a maintenance window in the Amazon Web Services Systems Manager User Guide.

                                                                      ", "smithy.api#required": {} } }, @@ -21214,7 +21230,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets (either instances or maintenance window targets).

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      \n

                                                                      Specify instances using the following format:

                                                                      \n

                                                                      \n Key=InstanceIds,Values=,\n

                                                                      \n

                                                                      Specify maintenance window targets using the following format:

                                                                      \n

                                                                      \n Key=WindowTargetIds,Values=,\n

                                                                      " + "smithy.api#documentation": "

                                                                      The targets (either managed nodes or maintenance window targets).

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      \n

                                                                      Specify managed nodes using the following format:

                                                                      \n

                                                                      \n Key=InstanceIds,Values=,\n

                                                                      \n

                                                                      Specify maintenance window targets using the following format:

                                                                      \n

                                                                      \n Key=WindowTargetIds,Values=,\n

                                                                      " } }, "TaskArn": { @@ -21271,7 +21287,7 @@ "LoggingInfo": { "target": "com.amazonaws.ssm#LoggingInfo", "traits": { - "smithy.api#documentation": "

                                                                      A structure containing information about an Amazon Simple Storage Service (Amazon S3) bucket\n to write instance-level logs to.

                                                                      \n \n

                                                                      \n LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the\n OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure.\n For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance\n window task types, see MaintenanceWindowTaskInvocationParameters.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      A structure containing information about an Amazon Simple Storage Service (Amazon S3) bucket\n to write managed node-level logs to.

                                                                      \n \n

                                                                      \n LoggingInfo has been deprecated. To specify an Amazon Simple Storage Service (Amazon S3) bucket to contain logs, instead use the\n OutputS3BucketName and OutputS3KeyPrefix options in the TaskInvocationParameters structure.\n For information about how Amazon Web Services Systems Manager handles these options for the supported maintenance\n window task types, see MaintenanceWindowTaskInvocationParameters.

                                                                      \n
                                                                      " } }, "Name": { @@ -21434,14 +21450,14 @@ "ResourceType": { "target": "com.amazonaws.ssm#ResourceTypeForTagging", "traits": { - "smithy.api#documentation": "

                                                                      The type of resource from which you want to remove a tag.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises\n managed instances. Specify the name of the managed instance in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The type of resource from which you want to remove a tag.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises\n managed nodes. Specify the name of the managed node in the following format:\n mi-ID_number\n . For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, "ResourceId": { "target": "com.amazonaws.ssm#ResourceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the resource from which you want to remove tags. For example:

                                                                      \n

                                                                      ManagedInstance: mi-012345abcde

                                                                      \n

                                                                      MaintenanceWindow: mw-012345abcde

                                                                      \n

                                                                      PatchBaseline: pb-012345abcde

                                                                      \n

                                                                      OpsMetadata object: ResourceID for tagging is created from the Amazon Resource\n Name (ARN) for the object. Specifically, ResourceID is created from the strings that\n come after the word opsmetadata in the ARN. For example, an OpsMetadata object with\n an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager\n has a ResourceID of either aws/ssm/MyGroup/appmanager or\n /aws/ssm/MyGroup/appmanager.

                                                                      \n

                                                                      For the Document and Parameter values, use the name of the resource.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises managed instances.\n Specify the name of the managed instance in the following format: mi-ID_number. For example,\n mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the resource from which you want to remove tags. For example:

                                                                      \n

                                                                      ManagedInstance: mi-012345abcde

                                                                      \n

                                                                      MaintenanceWindow: mw-012345abcde

                                                                      \n

                                                                      PatchBaseline: pb-012345abcde

                                                                      \n

                                                                      OpsMetadata object: ResourceID for tagging is created from the Amazon Resource\n Name (ARN) for the object. Specifically, ResourceID is created from the strings that\n come after the word opsmetadata in the ARN. For example, an OpsMetadata object with\n an ARN of arn:aws:ssm:us-east-2:1234567890:opsmetadata/aws/ssm/MyGroup/appmanager\n has a ResourceID of either aws/ssm/MyGroup/appmanager or\n /aws/ssm/MyGroup/appmanager.

                                                                      \n

                                                                      For the Document and Parameter values, use the name of the resource.

                                                                      \n \n

                                                                      The ManagedInstance type for this API operation is only for on-premises\n managed nodes. Specify the name of the managed node in the following format: mi-ID_number. For\n example, mi-1a2b3c4d5e6f.

                                                                      \n
                                                                      ", "smithy.api#required": {} } }, @@ -22231,7 +22247,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Reconnects a session to an instance after it has been disconnected. Connections can be\n resumed for disconnected sessions, but not terminated sessions.

                                                                      \n \n

                                                                      This command is primarily for use by client machines to automatically reconnect during\n intermittent network issues. It isn't intended for any other use.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Reconnects a session to a managed node after it has been disconnected. Connections can be\n resumed for disconnected sessions, but not terminated sessions.

                                                                      \n \n

                                                                      This command is primarily for use by client machines to automatically reconnect during\n intermittent network issues. It isn't intended for any other use.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#ResumeSessionRequest": { @@ -22258,13 +22274,13 @@ "TokenValue": { "target": "com.amazonaws.ssm#TokenValue", "traits": { - "smithy.api#documentation": "

                                                                      An encrypted token value containing session and caller information. Used to authenticate the\n connection to the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      An encrypted token value containing session and caller information. Used to authenticate the\n connection to the managed node.

                                                                      " } }, "StreamUrl": { "target": "com.amazonaws.ssm#StreamUrl", "traits": { - "smithy.api#documentation": "

                                                                      A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and\n receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

                                                                      \n

                                                                      \n region represents the Region identifier for an\n\t\t\t\t\t\tAmazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region.\n\t\t\t\t\t\tFor a list of supported region values, see the Region column in Systems Manager service endpoints in the\n Amazon Web Services General Reference.

                                                                      \n

                                                                      \n session-id represents the ID of a Session Manager session, such as\n 1a2b3c4dEXAMPLE.

                                                                      " + "smithy.api#documentation": "

                                                                      A URL back to SSM Agent on the managed node that the Session Manager client uses to send commands and\n receive output from the managed node. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output).

                                                                      \n

                                                                      \n region represents the Region identifier for an\n\t\t\t\t\t\tAmazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region.\n\t\t\t\t\t\tFor a list of supported region values, see the Region column in Systems Manager service endpoints in the\n Amazon Web Services General Reference.

                                                                      \n

                                                                      \n session-id represents the ID of a Session Manager session, such as\n 1a2b3c4dEXAMPLE.

                                                                      " } } } @@ -22618,7 +22634,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Runs commands on one or more managed instances.

                                                                      " + "smithy.api#documentation": "

                                                                      Runs commands on one or more managed nodes.

                                                                      " } }, "com.amazonaws.ssm#SendCommandRequest": { @@ -22627,13 +22643,13 @@ "InstanceIds": { "target": "com.amazonaws.ssm#InstanceIdList", "traits": { - "smithy.api#documentation": "

                                                                      The IDs of the instances where the command should run. Specifying instance IDs is most\n useful when you are targeting a limited number of instances, though you can specify up to 50\n IDs.

                                                                      \n

                                                                      To target a larger number of instances, or if you prefer not to list individual instance\n IDs, we recommend using the Targets option instead. Using Targets,\n which accepts tag key-value pairs to identify the instances to send commands to, you can a send\n command to tens, hundreds, or thousands of instances at once.

                                                                      \n

                                                                      For more information about how to use targets, see Using targets and rate\n controls to send commands to a fleet in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      The IDs of the managed nodes where the command should run. Specifying managed node IDs is most\n useful when you are targeting a limited number of managed nodes, though you can specify up to 50\n IDs.

                                                                      \n

                                                                      To target a larger number of managed nodes, or if you prefer not to list individual node\n IDs, we recommend using the Targets option instead. Using Targets,\n which accepts tag key-value pairs to identify the managed nodes to send commands to, you can a\n send command to tens, hundreds, or thousands of nodes at once.

                                                                      \n

                                                                      For more information about how to use targets, see Using targets and rate\n controls to send commands to a fleet in the\n Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      An array of search criteria that targets instances using a Key,Value\n combination that you specify. Specifying targets is most useful when you want to send a command\n to a large number of instances at once. Using Targets, which accepts tag key-value\n pairs to identify instances, you can send a command to tens, hundreds, or thousands of instances\n at once.

                                                                      \n

                                                                      To send a command to a smaller number of instances, you can use the InstanceIds\n option instead.

                                                                      \n

                                                                      For more information about how to use targets, see Sending commands to a\n fleet in the Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of search criteria that targets managed nodes using a Key,Value\n combination that you specify. Specifying targets is most useful when you want to send a command\n to a large number of managed nodes at once. Using Targets, which accepts tag\n key-value pairs to identify managed nodes, you can send a command to tens, hundreds, or thousands\n of nodes at once.

                                                                      \n

                                                                      To send a command to a smaller number of managed nodes, you can use the\n InstanceIds option instead.

                                                                      \n

                                                                      For more information about how to use targets, see Sending commands to a\n fleet in the Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "DocumentName": { @@ -22701,7 +22717,7 @@ "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      (Optional) The maximum number of instances that are allowed to run the command at the same\n time. You can specify a number such as 10 or a percentage such as 10%. The default value is\n 50. For more information about how to use MaxConcurrency, see Using\n concurrency controls in the Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      (Optional) The maximum number of managed nodes that are allowed to run the command at the\n same time. You can specify a number such as 10 or a percentage such as 10%. The default value is\n 50. For more information about how to use MaxConcurrency, see Using\n concurrency controls in the Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "MaxErrors": { @@ -22834,7 +22850,7 @@ "Target": { "target": "com.amazonaws.ssm#SessionTarget", "traits": { - "smithy.api#documentation": "

                                                                      The instance that the Session Manager session connected to.

                                                                      " + "smithy.api#documentation": "

                                                                      The managed node that the Session Manager session connected to.

                                                                      " } }, "Status": { @@ -22893,7 +22909,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Information about a Session Manager connection to an instance.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about a Session Manager connection to a managed node.

                                                                      " } }, "com.amazonaws.ssm#SessionDetails": { @@ -22918,7 +22934,7 @@ "value": { "target": "com.amazonaws.ssm#SessionFilterValue", "traits": { - "smithy.api#documentation": "

                                                                      The filter value. Valid values for each filter key are as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        InvokedAfter: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        InvokedBefore: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Target: Specify an instance to which session connections have been made.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Owner: Specify an Amazon Web Services user account to see a list of sessions started by that\n user.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Status: Specify a valid session status to see a list of all sessions with that status.\n Status values you can specify include:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Connected

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Connecting

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Disconnected

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Terminated

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Terminating

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Failed

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        SessionId: Specify a session ID to return details about the session.

                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#documentation": "

                                                                      The filter value. Valid values for each filter key are as follows:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        InvokedAfter: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started August 29, 2018, and later.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        InvokedBefore: Specify a timestamp to limit your results. For example, specify\n 2018-08-29T00:00:00Z to see sessions that started before August 29, 2018.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Target: Specify a managed node to which session connections have been made.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Owner: Specify an Amazon Web Services user account to see a list of sessions started by that\n user.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        Status: Specify a valid session status to see a list of all sessions with that status.\n Status values you can specify include:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          Connected

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Connecting

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Disconnected

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Terminated

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Terminating

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          Failed

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        SessionId: Specify a session ID to return details about the session.

                                                                        \n
                                                                      • \n
                                                                      ", "smithy.api#required": {} } } @@ -23189,7 +23205,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      The number of managed instances found for each patch severity level defined in the request\n filter.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of managed nodes found for each patch severity level defined in the request\n filter.

                                                                      " } }, "com.amazonaws.ssm#SharedDocumentVersion": { @@ -23243,6 +23259,35 @@ "smithy.api#pattern": "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" } }, + "com.amazonaws.ssm#SourceId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 128 + }, + "smithy.api#pattern": "^[a-zA-Z0-9:_-]*$" + } + }, + "com.amazonaws.ssm#SourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS::EC2::Instance", + "name": "AWS_EC2_INSTANCE" + }, + { + "value": "AWS::IoT::Thing", + "name": "AWS_IOT_THING" + }, + { + "value": "AWS::SSM::ManagedInstance", + "name": "AWS_SSM_MANAGEDINSTANCE" + } + ] + } + }, "com.amazonaws.ssm#StandardErrorContent": { "type": "string", "traits": { @@ -23560,7 +23605,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Initiates a connection to a target (for example, an instance) for a Session Manager session. Returns a\n URL and token that can be used to open a WebSocket connection for sending input and receiving\n outputs.

                                                                      \n \n

                                                                      Amazon Web Services CLI usage: start-session is an interactive command that requires the Session Manager\n plugin to be installed on the client machine making the call. For information, see Install\n the Session Manager plugin for the Amazon Web Services CLI in the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      Amazon Web Services Tools for PowerShell usage: Start-SSMSession isn't currently supported by Amazon Web Services Tools\n for PowerShell on Windows local machines.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      Initiates a connection to a target (for example, a managed node) for a Session Manager session. Returns a\n URL and token that can be used to open a WebSocket connection for sending input and receiving\n outputs.

                                                                      \n \n

                                                                      Amazon Web Services CLI usage: start-session is an interactive command that requires the Session Manager\n plugin to be installed on the client machine making the call. For information, see Install\n the Session Manager plugin for the Amazon Web Services CLI in the Amazon Web Services Systems Manager User Guide.

                                                                      \n

                                                                      Amazon Web Services Tools for PowerShell usage: Start-SSMSession isn't currently supported by Amazon Web Services Tools\n for PowerShell on Windows local machines.

                                                                      \n
                                                                      " } }, "com.amazonaws.ssm#StartSessionRequest": { @@ -23569,14 +23614,14 @@ "Target": { "target": "com.amazonaws.ssm#SessionTarget", "traits": { - "smithy.api#documentation": "

                                                                      The instance to connect to for the session.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node to connect to for the session.

                                                                      ", "smithy.api#required": {} } }, "DocumentName": { "target": "com.amazonaws.ssm#DocumentARN", "traits": { - "smithy.api#documentation": "

                                                                      The name of the SSM document to define the parameters and plugin settings for the session.\n For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session.\n If no document name is provided, a shell to the instance is launched by default.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the SSM document to define the parameters and plugin settings for the session.\n For example, SSM-SessionManagerRunShell. You can call the GetDocument API to verify the document exists before attempting to start a session.\n If no document name is provided, a shell to the managed node is launched by default.

                                                                      " } }, "Reason": { @@ -23605,13 +23650,13 @@ "TokenValue": { "target": "com.amazonaws.ssm#TokenValue", "traits": { - "smithy.api#documentation": "

                                                                      An encrypted token value containing session and caller information. Used to authenticate the\n connection to the instance.

                                                                      " + "smithy.api#documentation": "

                                                                      An encrypted token value containing session and caller information. Used to authenticate the\n connection to the managed node.

                                                                      " } }, "StreamUrl": { "target": "com.amazonaws.ssm#StreamUrl", "traits": { - "smithy.api#documentation": "

                                                                      A URL back to SSM Agent on the instance that the Session Manager client uses to send commands and\n receive output from the instance. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)\n

                                                                      \n

                                                                      \n region represents the Region identifier for an\n\t\t\t\t\t\tAmazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region.\n\t\t\t\t\t\tFor a list of supported region values, see the Region column in Systems Manager service endpoints in the\n Amazon Web Services General Reference.

                                                                      \n

                                                                      \n session-id represents the ID of a Session Manager session, such as\n 1a2b3c4dEXAMPLE.

                                                                      " + "smithy.api#documentation": "

                                                                      A URL back to SSM Agent on the managed node that the Session Manager client uses to send commands and\n receive output from the node. Format: wss://ssmmessages.region.amazonaws.com/v1/data-channel/session-id?stream=(input|output)\n

                                                                      \n

                                                                      \n region represents the Region identifier for an\n\t\t\t\t\t\tAmazon Web Services Region supported by Amazon Web Services Systems Manager, such as us-east-2 for the US East (Ohio) Region.\n\t\t\t\t\t\tFor a list of supported region values, see the Region column in Systems Manager service endpoints in the\n Amazon Web Services General Reference.

                                                                      \n

                                                                      \n session-id represents the ID of a Session Manager session, such as\n 1a2b3c4dEXAMPLE.

                                                                      " } } } @@ -24007,7 +24052,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      Metadata that you assign to your Amazon Web Services resources. Tags enable you to categorize your\n resources in different ways, for example, by purpose, owner, or environment. In Amazon Web Services Systems Manager, you\n can apply tags to Systems Manager documents (SSM documents), managed instances, maintenance windows,\n parameters, patch baselines, OpsItems, and OpsMetadata.

                                                                      " + "smithy.api#documentation": "

                                                                      Metadata that you assign to your Amazon Web Services resources. Tags enable you to categorize your\n resources in different ways, for example, by purpose, owner, or environment. In Amazon Web Services Systems Manager, you\n can apply tags to Systems Manager documents (SSM documents), managed nodes, maintenance windows,\n parameters, patch baselines, OpsItems, and OpsMetadata.

                                                                      " } }, "com.amazonaws.ssm#TagKey": { @@ -24048,7 +24093,7 @@ "Key": { "target": "com.amazonaws.ssm#TargetKey", "traits": { - "smithy.api#documentation": "

                                                                      User-defined criteria for sending commands that target instances that meet the\n criteria.

                                                                      " + "smithy.api#documentation": "

                                                                      User-defined criteria for sending commands that target managed nodes that meet the\n criteria.

                                                                      " } }, "Values": { @@ -24059,7 +24104,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      An array of search criteria that targets instances using a key-value pair that you\n specify.

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      \n

                                                                      Supported formats include the following.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=InstanceIds,Values=instance-id-1,instance-id-2,instance-id-3\n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag:my-tag-key,Values=my-tag-value-1,my-tag-value-2\n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag-key,Values=my-tag-key-1,my-tag-key-2\n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Run Command and Maintenance window targets only:\n Key=resource-groups:Name,Values=resource-group-name\n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Maintenance window targets only:\n Key=resource-groups:ResourceTypeFilters,Values=resource-type-1,resource-type-2\n \n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Automation targets only:\n Key=ResourceGroup;Values=resource-group-name\n \n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For example:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag-key,Values=Name,Instance-Type,CostCenter\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Run Command and Maintenance window targets only:\n Key=resource-groups:Name,Values=ProductionResourceGroup\n

                                                                        \n

                                                                        This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Maintenance window targets only:\n Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC\n \n

                                                                        \n

                                                                        This example demonstrates how to target only Amazon Elastic Compute Cloud (Amazon EC2)\n instances and VPCs in your maintenance window.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Automation targets only:\n Key=ResourceGroup,Values=MyResourceGroup\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n State Manager association targets only:\n Key=InstanceIds,Values=*\n \n

                                                                        \n

                                                                        This example demonstrates how to target all managed instances in the Amazon Web Services Region where\n the association was created.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about how to send commands that target instances using\n Key,Value parameters, see Targeting multiple instances in the Amazon Web Services Systems Manager User Guide.

                                                                      " + "smithy.api#documentation": "

                                                                      An array of search criteria that targets managed nodes using a key-value pair that you\n specify.

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      \n

                                                                      Supported formats include the following.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=InstanceIds,Values=,,\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag:,Values=,\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag-key,Values=,\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Run Command and Maintenance window targets only:\n Key=resource-groups:Name,Values=\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Maintenance window targets only:\n Key=resource-groups:ResourceTypeFilters,Values=,\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Automation targets only:\n Key=ResourceGroup;Values=\n

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For example:

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        \n Key=InstanceIds,Values=i-02573cafcfEXAMPLE,i-0471e04240EXAMPLE,i-07782c72faEXAMPLE\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag:CostCenter,Values=CostCenter1,CostCenter2,CostCenter3\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Key=tag-key,Values=Name,Instance-Type,CostCenter\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Run Command and Maintenance window targets only:\n Key=resource-groups:Name,Values=ProductionResourceGroup\n

                                                                        \n

                                                                        This example demonstrates how to target all resources in the resource group ProductionResourceGroup in your maintenance window.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Maintenance window targets only:\n Key=resource-groups:ResourceTypeFilters,Values=AWS::EC2::INSTANCE,AWS::EC2::VPC\n

                                                                        \n

                                                                        This example demonstrates how to target only Amazon Elastic Compute Cloud (Amazon EC2)\n instances and VPCs in your maintenance window.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Automation targets only:\n Key=ResourceGroup,Values=MyResourceGroup\n

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n State Manager association targets only:\n Key=InstanceIds,Values=*\n

                                                                        \n

                                                                        This example demonstrates how to target all managed instances in the Amazon Web Services Region where\n the association was created.

                                                                        \n
                                                                      • \n
                                                                      \n

                                                                      For more information about how to send commands that target managed nodes using\n Key,Value parameters, see Targeting multiple instances in the Amazon Web Services Systems Manager User Guide.

                                                                      " } }, "com.amazonaws.ssm#TargetCount": { @@ -24210,7 +24255,7 @@ "code": "TargetNotConnected", "httpResponseCode": 430 }, - "smithy.api#documentation": "

                                                                      The specified target instance for the session isn't fully configured for use with Session Manager. For\n more information, see Getting started with\n Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you\n attempt to start a session on an instance that is located in a different account or Region

                                                                      ", + "smithy.api#documentation": "

                                                                      The specified target managed node for the session isn't fully configured for use with Session Manager. For\n more information, see Getting started with\n Session Manager in the Amazon Web Services Systems Manager User Guide. This error is also returned if you\n attempt to start a session on a managed node that is located in a different account or Region

                                                                      ", "smithy.api#error": "client" } }, @@ -24274,7 +24319,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Permanently ends a session and closes the data connection between the Session Manager client and\n SSM Agent on the instance. A terminated session isn't be resumed.

                                                                      " + "smithy.api#documentation": "

                                                                      Permanently ends a session and closes the data connection between the Session Manager client and\n SSM Agent on the managed node. A terminated session isn't be resumed.

                                                                      " } }, "com.amazonaws.ssm#TerminateSessionRequest": { @@ -24552,7 +24597,7 @@ "code": "UnsupportedPlatformType", "httpResponseCode": 400 }, - "smithy.api#documentation": "

                                                                      The document doesn't support the platform type of the given instance ID(s). For example, you\n sent an document for a Windows instance to a Linux instance.

                                                                      ", + "smithy.api#documentation": "

                                                                      The document doesn't support the platform type of the given managed node ID(s). For example, you\n sent an document for a Windows managed node to a Linux node.

                                                                      ", "smithy.api#error": "client" } }, @@ -24643,7 +24688,7 @@ "Name": { "target": "com.amazonaws.ssm#DocumentARN", "traits": { - "smithy.api#documentation": "

                                                                      The name of the SSM Command document or Automation runbook that contains the configuration\n information for the instance.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For Systems Manager document (SSM document) that are shared with you from other Amazon Web Services accounts, you\n must specify the complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:aws:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      " + "smithy.api#documentation": "

                                                                      The name of the SSM Command document or Automation runbook that contains the configuration\n information for the managed node.

                                                                      \n

                                                                      You can specify Amazon Web Services-predefined documents, documents you created, or a document that is\n shared with you from another account.

                                                                      \n

                                                                      For Systems Manager document (SSM document) that are shared with you from other Amazon Web Services accounts, you\n must specify the complete SSM document ARN, in the following format:

                                                                      \n

                                                                      \n arn:aws:ssm:region:account-id:document/document-name\n \n

                                                                      \n

                                                                      For example:

                                                                      \n

                                                                      \n arn:aws:ssm:us-east-2:12345678912:document/My-Shared-Document\n

                                                                      \n

                                                                      For Amazon Web Services-predefined documents and SSM documents you created in your account, you only need\n to specify the document name. For example, AWS-ApplyPatchBaseline or\n My-Document.

                                                                      " } }, "Targets": { @@ -24673,13 +24718,13 @@ "MaxErrors": { "target": "com.amazonaws.ssm#MaxErrors", "traits": { - "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n instances and set MaxError to 10%, then the system stops sending the request when\n the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " + "smithy.api#documentation": "

                                                                      The number of errors that are allowed before the system stops sending requests to run the\n association on additional targets. You can specify either an absolute number of errors, for\n example 10, or a percentage of the target set, for example 10%. If you specify 3, for example,\n the system stops sending requests when the fourth error is received. If you specify 0, then the\n system stops sending requests after the first error is returned. If you run an association on 50\n managed nodes and set MaxError to 10%, then the system stops sending the request\n when the sixth error is received.

                                                                      \n

                                                                      Executions that are already running an association when MaxErrors is reached\n are allowed to complete, but some of these executions may fail as well. If you need to ensure\n that there won't be more than max-errors failed executions, set MaxConcurrency to 1\n so that executions proceed one at a time.

                                                                      " } }, "MaxConcurrency": { "target": "com.amazonaws.ssm#MaxConcurrency", "traits": { - "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new instance starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new instance will process its association within the limit specified\n for MaxConcurrency.

                                                                      " + "smithy.api#documentation": "

                                                                      The maximum number of targets allowed to run the association at the same time. You can\n specify a number, for example 10, or a percentage of the target set, for example 10%. The default\n value is 100%, which means all targets run the association at the same time.

                                                                      \n

                                                                      If a new managed node starts and attempts to run an association while Systems Manager is running\n MaxConcurrency associations, the association is allowed to run. During the next\n association interval, the new managed node will process its association within the limit specified\n for MaxConcurrency.

                                                                      " } }, "ComplianceSeverity": { @@ -24754,7 +24799,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Updates the status of the Amazon Web Services Systems Manager document (SSM document) associated with the specified\n instance.

                                                                      \n

                                                                      \n UpdateAssociationStatus is primarily used by the Amazon Web Services Systems Manager Agent (SSM Agent) to\n report status updates about your associations and is only used for associations created with the\n InstanceId legacy parameter.

                                                                      " + "smithy.api#documentation": "

                                                                      Updates the status of the Amazon Web Services Systems Manager document (SSM document) associated with the specified\n managed node.

                                                                      \n

                                                                      \n UpdateAssociationStatus is primarily used by the Amazon Web Services Systems Manager Agent (SSM Agent) to\n report status updates about your associations and is only used for associations created with the\n InstanceId legacy parameter.

                                                                      " } }, "com.amazonaws.ssm#UpdateAssociationStatusRequest": { @@ -24770,7 +24815,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#InstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The instance ID.

                                                                      ", + "smithy.api#documentation": "

                                                                      The managed node ID.

                                                                      ", "smithy.api#required": {} } }, @@ -25354,7 +25399,7 @@ "Targets": { "target": "com.amazonaws.ssm#Targets", "traits": { - "smithy.api#documentation": "

                                                                      The targets (either instances or tags) to modify. Instances are specified using the format\n Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using the\n format Key=tag_name,Values=tag_value.

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      The targets (either managed nodes or tags) to modify. Managed nodes are specified using the\n format Key=instanceids,Values=instanceID_1,instanceID_2. Tags are specified using\n the format Key=tag_name,Values=tag_value.

                                                                      \n \n

                                                                      One or more targets must be specified for maintenance window Run Command-type tasks.\n Depending on the task, targets are optional for other maintenance window task types (Automation,\n Lambda, and Step Functions). For more information about running tasks\n that don't specify targets, see Registering\n maintenance window tasks without targets in the\n Amazon Web Services Systems Manager User Guide.

                                                                      \n
                                                                      " } }, "TaskArn": { @@ -25539,7 +25584,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Changes the Identity and Access Management (IAM) role that is assigned to the\n on-premises instance or virtual machines (VM). IAM roles are first assigned to\n these hybrid instances during the activation process. For more information, see CreateActivation.

                                                                      " + "smithy.api#documentation": "

                                                                      Changes the Identity and Access Management (IAM) role that is assigned to the\n on-premises server, edge device, or virtual machines (VM). IAM roles are first\n assigned to these hybrid nodes during the activation process. For more information, see CreateActivation.

                                                                      " } }, "com.amazonaws.ssm#UpdateManagedInstanceRoleRequest": { @@ -25548,7 +25593,7 @@ "InstanceId": { "target": "com.amazonaws.ssm#ManagedInstanceId", "traits": { - "smithy.api#documentation": "

                                                                      The ID of the managed instance where you want to update the role.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID of the managed node where you want to update the role.

                                                                      ", "smithy.api#required": {} } }, @@ -25822,7 +25867,7 @@ "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the instances. The default value is false. Applies to Linux instances\n only.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the managed nodes. The default value is false. Applies to Linux managed\n nodes only.

                                                                      " } }, "RejectedPatches": { @@ -25846,7 +25891,7 @@ "Sources": { "target": "com.amazonaws.ssm#PatchSourceList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repositories. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.

                                                                      " } }, "Replace": { @@ -25907,7 +25952,7 @@ "target": "com.amazonaws.ssm#Boolean", "traits": { "smithy.api#box": {}, - "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the instances. The default value is false. Applies to Linux instances\n only.

                                                                      " + "smithy.api#documentation": "

                                                                      Indicates whether the list of approved patches includes non-security updates that should be\n applied to the managed nodes. The default value is false. Applies to Linux managed\n nodes only.

                                                                      " } }, "RejectedPatches": { @@ -25943,7 +25988,7 @@ "Sources": { "target": "com.amazonaws.ssm#PatchSourceList", "traits": { - "smithy.api#documentation": "

                                                                      Information about the patches to use to update the instances, including target operating\n systems and source repositories. Applies to Linux instances only.

                                                                      " + "smithy.api#documentation": "

                                                                      Information about the patches to use to update the managed nodes, including target operating\n systems and source repositories. Applies to Linux managed nodes only.

                                                                      " } } } diff --git a/codegen/sdk-codegen/aws-models/storage-gateway.json b/codegen/sdk-codegen/aws-models/storage-gateway.json index 0d97cd040aa4..84b9a4d859f7 100644 --- a/codegen/sdk-codegen/aws-models/storage-gateway.json +++ b/codegen/sdk-codegen/aws-models/storage-gateway.json @@ -83,7 +83,7 @@ "GatewayType": { "target": "com.amazonaws.storagegateway#GatewayType", "traits": { - "smithy.api#documentation": "

                                                                      A value that defines the type of gateway to activate. The type specified is critical to\n all later functions of the gateway and cannot be changed after activation. The default\n value is CACHED.

                                                                      \n\n

                                                                      Valid Values: STORED | CACHED | VTL |\n FILE_S3 | FILE_FSX_SMB|\n

                                                                      " + "smithy.api#documentation": "

                                                                      A value that defines the type of gateway to activate. The type specified is critical to\n all later functions of the gateway and cannot be changed after activation. The default\n value is CACHED.

                                                                      \n\n

                                                                      Valid Values: STORED | CACHED | VTL |\n VTL_SNOW | FILE_S3 | FILE_FSX_SMB\n

                                                                      " } }, "TapeDriveType": { @@ -3158,7 +3158,7 @@ "HostEnvironment": { "target": "com.amazonaws.storagegateway#HostEnvironment", "traits": { - "smithy.api#documentation": "

                                                                      The type of hypervisor environment used by the host.

                                                                      " + "smithy.api#documentation": "

                                                                      The type of hardware or software platform on which the gateway is running.

                                                                      " } }, "EndpointType": { @@ -3190,6 +3190,12 @@ "traits": { "smithy.api#documentation": "

                                                                      A list of the metadata cache sizes that the gateway can support based on its current\n hardware specifications.

                                                                      " } + }, + "HostEnvironmentId": { + "target": "com.amazonaws.storagegateway#HostEnvironmentId", + "traits": { + "smithy.api#documentation": "

                                                                      A unique identifier for the specific instance of the host platform running the gateway.\n This value is only available for certain host environments, and its format depends on the\n host environment type.

                                                                      " + } } }, "traits": { @@ -4998,6 +5004,18 @@ "traits": { "smithy.api#documentation": "

                                                                      The Amazon Web Services Region where the Amazon EC2 instance is located.

                                                                      " } + }, + "HostEnvironment": { + "target": "com.amazonaws.storagegateway#HostEnvironment", + "traits": { + "smithy.api#documentation": "

                                                                      The type of hardware or software platform on which the gateway is running.

                                                                      " + } + }, + "HostEnvironmentId": { + "target": "com.amazonaws.storagegateway#HostEnvironmentId", + "traits": { + "smithy.api#documentation": "

                                                                      A unique identifier for the specific instance of the host platform running the gateway.\n This value is only available for certain host environments, and its format depends on the\n host environment type.

                                                                      " + } } }, "traits": { @@ -5096,10 +5114,23 @@ { "value": "OTHER", "name": "OTHER" + }, + { + "value": "SNOWBALL", + "name": "SNOWBALL" } ] } }, + "com.amazonaws.storagegateway#HostEnvironmentId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.storagegateway#Hosts": { "type": "list", "member": { @@ -7362,6 +7393,24 @@ }, "com.amazonaws.storagegateway#StorageGateway_20130630": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "Storage Gateway", + "arnNamespace": "storagegateway", + "cloudFormationName": "StorageGateway", + "cloudTrailEventSource": "storagegateway.amazonaws.com", + "endpointPrefix": "storagegateway" + }, + "aws.auth#sigv4": { + "name": "storagegateway" + }, + "aws.protocols#awsJson1_1": {}, + "smithy.api#documentation": "Storage Gateway Service\n\n

                                                                      Storage Gateway is the service that connects an on-premises software appliance\n with cloud-based storage to provide seamless and secure integration between an\n organization's on-premises IT environment and the Amazon Web Services storage\n infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.

                                                                      \n\n

                                                                      Use the following links to get started using the Storage Gateway\n Service API Reference:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n Storage Gateway required request headers: Describes the required\n headers that you must send with every POST request to Storage Gateway.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Signing requests: Storage Gateway requires that you authenticate\n every request you send; this topic describes how sign such a request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Error responses: Provides reference information about Storage Gateway errors.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and\n examples of requests and responses.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Storage Gateway\n endpoints and quotas: Provides a list of each Amazon Web Services Region\n and the endpoints available for use with Storage Gateway.

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      Storage Gateway resource IDs are in uppercase. When you use these resource IDs\n with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change\n your resource ID to lowercase to use it with the EC2 API. For example, in Storage\n Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use\n this ID with the EC2 API, you must change it to vol-aa22bb012345daf670.\n Otherwise, the EC2 API might not behave as expected.

                                                                      \n
                                                                      \n\n \n

                                                                      IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway\n volumes are changing to a longer format. Starting in December 2016, all new volumes and\n snapshots will be created with a 17-character string. Starting in April 2016, you will\n be able to use these longer IDs so you can test your systems with the new format. For\n more information, see Longer EC2 and\n EBS resource IDs.

                                                                      \n\n

                                                                      For example, a volume Amazon Resource Name (ARN) with the longer volume ID format\n looks like the following:

                                                                      \n\n

                                                                      \n arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

                                                                      \n\n

                                                                      A snapshot ID with the longer ID format looks like the following:\n snap-78e226633445566ee.

                                                                      \n\n

                                                                      For more information, see Announcement:\n Heads-up – Longer Storage Gateway volume and snapshot IDs coming in\n 2016.

                                                                      \n
                                                                      ", + "smithy.api#title": "AWS Storage Gateway", + "smithy.api#xmlNamespace": { + "uri": "http://storagegateway.amazonaws.com/doc/2013-06-30" + } + }, "version": "2013-06-30", "operations": [ { @@ -7634,25 +7683,7 @@ { "target": "com.amazonaws.storagegateway#UpdateVTLDeviceType" } - ], - "traits": { - "aws.api#service": { - "sdkId": "Storage Gateway", - "arnNamespace": "storagegateway", - "cloudFormationName": "StorageGateway", - "cloudTrailEventSource": "storagegateway.amazonaws.com", - "endpointPrefix": "storagegateway" - }, - "aws.auth#sigv4": { - "name": "storagegateway" - }, - "aws.protocols#awsJson1_1": {}, - "smithy.api#documentation": "Storage Gateway Service\n\n

                                                                      Storage Gateway is the service that connects an on-premises software appliance\n with cloud-based storage to provide seamless and secure integration between an\n organization's on-premises IT environment and the Amazon Web Services storage\n infrastructure. The service enables you to securely upload data to the Amazon Web Services Cloud for cost effective backup and rapid disaster recovery.

                                                                      \n\n

                                                                      Use the following links to get started using the Storage Gateway\n Service API Reference:

                                                                      \n\n
                                                                        \n
                                                                      • \n

                                                                        \n Storage Gateway required request headers: Describes the required\n headers that you must send with every POST request to Storage Gateway.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Signing requests: Storage Gateway requires that you authenticate\n every request you send; this topic describes how sign such a request.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Error responses: Provides reference information about Storage Gateway errors.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Operations in Storage Gateway: Contains detailed descriptions of all Storage Gateway operations, their request parameters, response elements, possible errors, and\n examples of requests and responses.

                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        \n Storage Gateway\n endpoints and quotas: Provides a list of each Amazon Web Services Region\n and the endpoints available for use with Storage Gateway.

                                                                        \n
                                                                      • \n
                                                                      \n\n \n

                                                                      Storage Gateway resource IDs are in uppercase. When you use these resource IDs\n with the Amazon EC2 API, EC2 expects resource IDs in lowercase. You must change\n your resource ID to lowercase to use it with the EC2 API. For example, in Storage\n Gateway the ID for a volume might be vol-AA22BB012345DAF670. When you use\n this ID with the EC2 API, you must change it to vol-aa22bb012345daf670.\n Otherwise, the EC2 API might not behave as expected.

                                                                      \n
                                                                      \n\n \n

                                                                      IDs for Storage Gateway volumes and Amazon EBS snapshots created from gateway\n volumes are changing to a longer format. Starting in December 2016, all new volumes and\n snapshots will be created with a 17-character string. Starting in April 2016, you will\n be able to use these longer IDs so you can test your systems with the new format. For\n more information, see Longer EC2 and\n EBS resource IDs.

                                                                      \n\n

                                                                      For example, a volume Amazon Resource Name (ARN) with the longer volume ID format\n looks like the following:

                                                                      \n\n

                                                                      \n arn:aws:storagegateway:us-west-2:111122223333:gateway/sgw-12A3456B/volume/vol-1122AABBCCDDEEFFG.

                                                                      \n\n

                                                                      A snapshot ID with the longer ID format looks like the following:\n snap-78e226633445566ee.

                                                                      \n\n

                                                                      For more information, see Announcement:\n Heads-up – Longer Storage Gateway volume and snapshot IDs coming in\n 2016.

                                                                      \n
                                                                      ", - "smithy.api#title": "AWS Storage Gateway", - "smithy.api#xmlNamespace": { - "uri": "http://storagegateway.amazonaws.com/doc/2013-06-30" - } - } + ] }, "com.amazonaws.storagegateway#StorediSCSIVolume": { "type": "structure", diff --git a/codegen/sdk-codegen/aws-models/wellarchitected.json b/codegen/sdk-codegen/aws-models/wellarchitected.json index 6ccb2c9f097e..b92fdaf1ff51 100644 --- a/codegen/sdk-codegen/aws-models/wellarchitected.json +++ b/codegen/sdk-codegen/aws-models/wellarchitected.json @@ -66,6 +66,12 @@ "HelpfulResourceUrl": { "target": "com.amazonaws.wellarchitected#HelpfulResourceUrl" }, + "HelpfulResourceDisplayText": { + "target": "com.amazonaws.wellarchitected#DisplayText", + "traits": { + "smithy.api#documentation": "

                                                                      The helpful resource text to be displayed.

                                                                      " + } + }, "Choices": { "target": "com.amazonaws.wellarchitected#Choices" }, @@ -201,7 +207,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Associate a lens to a workload.

                                                                      ", + "smithy.api#documentation": "

                                                                      Associate a lens to a workload.

                                                                      \n

                                                                      Up to 10 lenses can be associated with a workload in a single API operation. A \n maximum of 20 lenses can be associated with a workload.

                                                                      \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      By accessing and/or applying custom lenses created by another Amazon Web Services user or account, \n you acknowledge that custom lenses created by other users and shared with you are \n Third Party Content as defined in the Amazon Web Services Customer Agreement.

                                                                      \n
                                                                      ", "smithy.api#http": { "method": "PATCH", "uri": "/workloads/{WorkloadId}/associateLenses", @@ -233,14 +239,14 @@ "com.amazonaws.wellarchitected#AwsAccountId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      An AWS account ID.

                                                                      ", + "smithy.api#documentation": "

                                                                      An Amazon Web Services account ID.

                                                                      ", "smithy.api#pattern": "^[0-9]{12}$" } }, "com.amazonaws.wellarchitected#AwsRegion": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      An AWS Region, for example, us-west-2 or\n ap-northeast-1.

                                                                      ", + "smithy.api#documentation": "

                                                                      An Amazon Web Services Region, for example, us-west-2 or\n ap-northeast-1.

                                                                      ", "smithy.api#length": { "min": 0, "max": 100 @@ -264,6 +270,18 @@ }, "Description": { "target": "com.amazonaws.wellarchitected#ChoiceDescription" + }, + "HelpfulResource": { + "target": "com.amazonaws.wellarchitected#ChoiceContent", + "traits": { + "smithy.api#documentation": "

                                                                      The choice level helpful resource.

                                                                      " + } + }, + "ImprovementPlan": { + "target": "com.amazonaws.wellarchitected#ChoiceContent", + "traits": { + "smithy.api#documentation": "

                                                                      The choice level improvement plan.

                                                                      " + } } }, "traits": { @@ -334,6 +352,44 @@ "target": "com.amazonaws.wellarchitected#ChoiceAnswer" } }, + "com.amazonaws.wellarchitected#ChoiceContent": { + "type": "structure", + "members": { + "DisplayText": { + "target": "com.amazonaws.wellarchitected#ChoiceContentDisplayText", + "traits": { + "smithy.api#documentation": "

                                                                      The display text for the choice content.

                                                                      " + } + }, + "Url": { + "target": "com.amazonaws.wellarchitected#ChoiceContentUrl", + "traits": { + "smithy.api#documentation": "

                                                                      The URL for the choice content.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The choice content.

                                                                      " + } + }, + "com.amazonaws.wellarchitected#ChoiceContentDisplayText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, + "com.amazonaws.wellarchitected#ChoiceContentUrl": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + } + } + }, "com.amazonaws.wellarchitected#ChoiceDescription": { "type": "string", "traits": { @@ -354,6 +410,32 @@ } } }, + "com.amazonaws.wellarchitected#ChoiceImprovementPlan": { + "type": "structure", + "members": { + "ChoiceId": { + "target": "com.amazonaws.wellarchitected#ChoiceId" + }, + "DisplayText": { + "target": "com.amazonaws.wellarchitected#DisplayText", + "traits": { + "smithy.api#documentation": "

                                                                      The display text for the improvement plan.

                                                                      " + } + }, + "ImprovementPlanUrl": { + "target": "com.amazonaws.wellarchitected#ImprovementPlanUrl" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The choice level improvement plan.

                                                                      " + } + }, + "com.amazonaws.wellarchitected#ChoiceImprovementPlans": { + "type": "list", + "member": { + "target": "com.amazonaws.wellarchitected#ChoiceImprovementPlan" + } + }, "com.amazonaws.wellarchitected#ChoiceNotes": { "type": "string", "traits": { @@ -467,7 +549,7 @@ "com.amazonaws.wellarchitected#ClientRequestToken": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      A unique case-sensitive string used to ensure that this request is idempotent\n (executes only once).

                                                                      \n

                                                                      You should not reuse the same token for other requests. If you retry a request with\n the same client request token and the same parameters after it has completed\n successfully, the result of the original request is returned.

                                                                      \n \n

                                                                      This token is listed as required, however, if you do not specify it, the AWS SDKs\n automatically generate one for you. If you are not using the AWS SDK or the AWS CLI,\n you must provide this token or the request will fail.

                                                                      \n
                                                                      " + "smithy.api#documentation": "

                                                                      A unique case-sensitive string used to ensure that this request is idempotent\n (executes only once).

                                                                      \n

                                                                      You should not reuse the same token for other requests. If you retry a request with\n the same client request token and the same parameters after it has completed\n successfully, the result of the original request is returned.

                                                                      \n \n

                                                                      This token is listed as required, however, if you do not specify it, the Amazon Web Services SDKs\n automatically generate one for you. If you are not using the Amazon Web Services SDK or the CLI,\n you must provide this token or the request will fail.

                                                                      \n
                                                                      " } }, "com.amazonaws.wellarchitected#ConflictException": { @@ -507,6 +589,168 @@ } } }, + "com.amazonaws.wellarchitected#CreateLensShare": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#CreateLensShareInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#CreateLensShareOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#ConflictException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Create a lens share.

                                                                      \n

                                                                      The owner of a lens can share it with other Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. \n Shared access to a lens is not removed until the lens invitation is deleted.

                                                                      \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, \n you acknowledge that Amazon Web Services will make your custom lenses available to those \n other accounts. Those other accounts may continue to access and use your \n shared custom lenses even if you delete the custom lenses \n from your own Amazon Web Services account or terminate \n your Amazon Web Services account.

                                                                      \n
                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/lenses/{LensAlias}/shares", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#CreateLensShareInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "SharedWith": { + "target": "com.amazonaws.wellarchitected#SharedWith", + "traits": { + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.wellarchitected#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.wellarchitected#CreateLensShareOutput": { + "type": "structure", + "members": { + "ShareId": { + "target": "com.amazonaws.wellarchitected#ShareId" + } + } + }, + "com.amazonaws.wellarchitected#CreateLensVersion": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#CreateLensVersionInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#CreateLensVersionOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#ConflictException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Create a new lens version.

                                                                      \n

                                                                      A lens can have up to 100 versions.

                                                                      \n

                                                                      After a lens has been imported, create a new lens version to publish it. The owner of a lens can share the lens with other \n Amazon Web Services accounts and IAM users in the same Amazon Web Services Region. Only the owner of a lens can delete it.\n

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/lenses/{LensAlias}/versions", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#CreateLensVersionInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "LensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the lens being created.

                                                                      ", + "smithy.api#required": {} + } + }, + "IsMajorVersion": { + "target": "com.amazonaws.wellarchitected#IsMajorVersion", + "traits": { + "smithy.api#documentation": "

                                                                      Set to true if this new major lens version.

                                                                      " + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.wellarchitected#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.wellarchitected#CreateLensVersionOutput": { + "type": "structure", + "members": { + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, + "LensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The version of the lens.

                                                                      " + } + } + } + }, "com.amazonaws.wellarchitected#CreateMilestone": { "type": "operation", "input": { @@ -618,7 +862,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Create a new workload.

                                                                      \n

                                                                      The owner of a workload can share the workload with other AWS accounts and IAM users\n in the same AWS Region. Only the owner of a workload can delete it.

                                                                      \n

                                                                      For more information, see Defining a Workload in the\n AWS Well-Architected Tool User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      Create a new workload.

                                                                      \n

                                                                      The owner of a workload can share the workload with other Amazon Web Services accounts and IAM users\n in the same Amazon Web Services Region. Only the owner of a workload can delete it.

                                                                      \n

                                                                      For more information, see Defining a Workload in the\n Well-Architected Tool User Guide.

                                                                      ", "smithy.api#http": { "method": "POST", "uri": "/workloads", @@ -747,7 +991,7 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Create a workload share.

                                                                      \n

                                                                      The owner of a workload can share it with other AWS accounts and IAM users in the same\n AWS Region. Shared access to a workload is not removed until the workload invitation is\n deleted.

                                                                      \n

                                                                      For more information, see Sharing a Workload in the\n AWS Well-Architected Tool User Guide.

                                                                      ", + "smithy.api#documentation": "

                                                                      Create a workload share.

                                                                      \n

                                                                      The owner of a workload can share it with other Amazon Web Services accounts and IAM users in the same\n Amazon Web Services Region. Shared access to a workload is not removed until the workload invitation is\n deleted.

                                                                      \n

                                                                      For more information, see Sharing a Workload in the\n Well-Architected Tool User Guide.

                                                                      ", "smithy.api#http": { "method": "POST", "uri": "/workloads/{WorkloadId}/shares", @@ -803,10 +1047,10 @@ "smithy.api#documentation": "

                                                                      Input for Create Workload Share

                                                                      " } }, - "com.amazonaws.wellarchitected#DeleteWorkload": { + "com.amazonaws.wellarchitected#DeleteLens": { "type": "operation", "input": { - "target": "com.amazonaws.wellarchitected#DeleteWorkloadInput" + "target": "com.amazonaws.wellarchitected#DeleteLensInput" }, "errors": [ { @@ -829,19 +1073,19 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Delete an existing workload.

                                                                      ", + "smithy.api#documentation": "

                                                                      Delete an existing lens.

                                                                      \n

                                                                      Only the owner of a lens can delete it. After the lens is deleted, Amazon Web Services accounts and IAM users \n that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.\n

                                                                      \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, \n you acknowledge that Amazon Web Services will make your custom lenses available to those \n other accounts. Those other accounts may continue to access and use your \n shared custom lenses even if you delete the custom lenses \n from your own Amazon Web Services account or terminate \n your Amazon Web Services account.

                                                                      \n
                                                                      ", "smithy.api#http": { "method": "DELETE", - "uri": "/workloads/{WorkloadId}", + "uri": "/lenses/{LensAlias}", "code": 200 } } }, - "com.amazonaws.wellarchitected#DeleteWorkloadInput": { + "com.amazonaws.wellarchitected#DeleteLensInput": { "type": "structure", "members": { - "WorkloadId": { - "target": "com.amazonaws.wellarchitected#WorkloadId", + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", "traits": { "smithy.api#httpLabel": {}, "smithy.api#required": {} @@ -854,16 +1098,21 @@ "smithy.api#idempotencyToken": {}, "smithy.api#required": {} } + }, + "LensStatus": { + "target": "com.amazonaws.wellarchitected#LensStatusType", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the lens to be deleted.

                                                                      ", + "smithy.api#httpQuery": "LensStatus", + "smithy.api#required": {} + } } - }, - "traits": { - "smithy.api#documentation": "

                                                                      Input for workload deletion.

                                                                      " } }, - "com.amazonaws.wellarchitected#DeleteWorkloadShare": { + "com.amazonaws.wellarchitected#DeleteLensShare": { "type": "operation", "input": { - "target": "com.amazonaws.wellarchitected#DeleteWorkloadShareInput" + "target": "com.amazonaws.wellarchitected#DeleteLensShareInput" }, "errors": [ { @@ -886,15 +1135,15 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Delete a workload share.

                                                                      ", + "smithy.api#documentation": "

                                                                      Delete a lens share.

                                                                      \n

                                                                      After the lens share is deleted, Amazon Web Services accounts and IAM users \n that you shared the lens with can continue to use it, but they will no longer be able to apply it to new workloads.

                                                                      \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      By sharing your custom lenses with other Amazon Web Services accounts, \n you acknowledge that Amazon Web Services will make your custom lenses available to those \n other accounts. Those other accounts may continue to access and use your \n shared custom lenses even if you delete the custom lenses \n from your own Amazon Web Services account or terminate \n your Amazon Web Services account.

                                                                      \n
                                                                      ", "smithy.api#http": { "method": "DELETE", - "uri": "/workloads/{WorkloadId}/shares/{ShareId}", + "uri": "/lenses/{LensAlias}/shares/{ShareId}", "code": 200 } } }, - "com.amazonaws.wellarchitected#DeleteWorkloadShareInput": { + "com.amazonaws.wellarchitected#DeleteLensShareInput": { "type": "structure", "members": { "ShareId": { @@ -904,8 +1153,8 @@ "smithy.api#required": {} } }, - "WorkloadId": { - "target": "com.amazonaws.wellarchitected#WorkloadId", + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", "traits": { "smithy.api#httpLabel": {}, "smithy.api#required": {} @@ -919,34 +1168,12 @@ "smithy.api#required": {} } } - }, - "traits": { - "smithy.api#documentation": "

                                                                      Input for Delete Workload Share

                                                                      " - } - }, - "com.amazonaws.wellarchitected#DifferenceStatus": { - "type": "string", - "traits": { - "smithy.api#enum": [ - { - "value": "UPDATED", - "name": "UPDATED" - }, - { - "value": "NEW", - "name": "NEW" - }, - { - "value": "DELETED", - "name": "DELETED" - } - ] } }, - "com.amazonaws.wellarchitected#DisassociateLenses": { + "com.amazonaws.wellarchitected#DeleteWorkload": { "type": "operation", "input": { - "target": "com.amazonaws.wellarchitected#DisassociateLensesInput" + "target": "com.amazonaws.wellarchitected#DeleteWorkloadInput" }, "errors": [ { @@ -969,15 +1196,155 @@ } ], "traits": { - "smithy.api#documentation": "

                                                                      Disassociate a lens from a workload.

                                                                      \n \n

                                                                      The AWS Well-Architected Framework lens (wellarchitected) cannot be\n removed from a workload.

                                                                      \n
                                                                      ", + "smithy.api#documentation": "

                                                                      Delete an existing workload.

                                                                      ", "smithy.api#http": { - "method": "PATCH", - "uri": "/workloads/{WorkloadId}/disassociateLenses", + "method": "DELETE", + "uri": "/workloads/{WorkloadId}", "code": 200 } } }, - "com.amazonaws.wellarchitected#DisassociateLensesInput": { + "com.amazonaws.wellarchitected#DeleteWorkloadInput": { + "type": "structure", + "members": { + "WorkloadId": { + "target": "com.amazonaws.wellarchitected#WorkloadId", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.wellarchitected#ClientRequestToken", + "traits": { + "smithy.api#httpQuery": "ClientRequestToken", + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Input for workload deletion.

                                                                      " + } + }, + "com.amazonaws.wellarchitected#DeleteWorkloadShare": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#DeleteWorkloadShareInput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#ConflictException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Delete a workload share.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/workloads/{WorkloadId}/shares/{ShareId}", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#DeleteWorkloadShareInput": { + "type": "structure", + "members": { + "ShareId": { + "target": "com.amazonaws.wellarchitected#ShareId", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "WorkloadId": { + "target": "com.amazonaws.wellarchitected#WorkloadId", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.wellarchitected#ClientRequestToken", + "traits": { + "smithy.api#httpQuery": "ClientRequestToken", + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Input for Delete Workload Share

                                                                      " + } + }, + "com.amazonaws.wellarchitected#DifferenceStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "UPDATED", + "name": "UPDATED" + }, + { + "value": "NEW", + "name": "NEW" + }, + { + "value": "DELETED", + "name": "DELETED" + } + ] + } + }, + "com.amazonaws.wellarchitected#DisassociateLenses": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#DisassociateLensesInput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#ConflictException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociate a lens from a workload.

                                                                      \n

                                                                      Up to 10 lenses can be disassociated from a workload in a single API operation.

                                                                      \n \n

                                                                      The Amazon Web Services Well-Architected Framework lens (wellarchitected) cannot be\n removed from a workload.

                                                                      \n
                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/workloads/{WorkloadId}/disassociateLenses", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#DisassociateLensesInput": { "type": "structure", "members": { "WorkloadId": { @@ -998,6 +1365,15 @@ "smithy.api#documentation": "

                                                                      Input to disassociate lens reviews.

                                                                      " } }, + "com.amazonaws.wellarchitected#DisplayText": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + } + } + }, "com.amazonaws.wellarchitected#ExceptionMessage": { "type": "string", "traits": { @@ -1016,6 +1392,70 @@ "smithy.api#documentation": "

                                                                      Type of the resource affected.

                                                                      " } }, + "com.amazonaws.wellarchitected#ExportLens": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#ExportLensInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#ExportLensOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Export an existing lens.

                                                                      \n

                                                                      Lenses are defined in JSON. For more information, see JSON format specification \n in the Well-Architected Tool User Guide. Only the owner of a lens can export it. \n

                                                                      \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      Do not include or gather personal identifiable information (PII) of end users or \n other identifiable individuals in or via your custom lenses. If your custom \n lens or those shared with you and used in your account do include or collect \n PII you are responsible for: ensuring that the included PII is processed in accordance \n with applicable law, providing adequate privacy notices, and obtaining necessary \n consents for processing such data.

                                                                      \n
                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/lenses/{LensAlias}/export", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#ExportLensInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "LensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The lens version to be exported.

                                                                      ", + "smithy.api#httpQuery": "LensVersion" + } + } + } + }, + "com.amazonaws.wellarchitected#ExportLensOutput": { + "type": "structure", + "members": { + "LensJSON": { + "target": "com.amazonaws.wellarchitected#LensJSON", + "traits": { + "smithy.api#documentation": "

                                                                      The JSON for the lens.

                                                                      " + } + } + } + }, "com.amazonaws.wellarchitected#GetAnswer": { "type": "operation", "input": { @@ -1097,6 +1537,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "Answer": { "target": "com.amazonaws.wellarchitected#Answer" } @@ -1105,6 +1551,70 @@ "smithy.api#documentation": "

                                                                      Output of a get answer call.

                                                                      " } }, + "com.amazonaws.wellarchitected#GetLens": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#GetLensInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#GetLensOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Get an existing lens.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/lenses/{LensAlias}", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#GetLensInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "LensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The lens version to be retrieved.

                                                                      ", + "smithy.api#httpQuery": "LensVersion" + } + } + } + }, + "com.amazonaws.wellarchitected#GetLensOutput": { + "type": "structure", + "members": { + "Lens": { + "target": "com.amazonaws.wellarchitected#Lens", + "traits": { + "smithy.api#documentation": "

                                                                      A lens return object.

                                                                      " + } + } + } + }, "com.amazonaws.wellarchitected#GetLensReview": { "type": "operation", "input": { @@ -1311,8 +1821,14 @@ "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { "smithy.api#documentation": "

                                                                      The base version of the lens.

                                                                      ", - "smithy.api#httpQuery": "BaseLensVersion", - "smithy.api#required": {} + "smithy.api#httpQuery": "BaseLensVersion" + } + }, + "TargetLensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The lens version to target a difference for.

                                                                      ", + "smithy.api#httpQuery": "TargetLensVersion" } } } @@ -1323,12 +1839,24 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "BaseLensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { "smithy.api#documentation": "

                                                                      The base version of the lens.

                                                                      " } }, + "TargetLensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The target lens version for the lens.

                                                                      " + } + }, "LatestLensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { @@ -1480,6 +2008,110 @@ } } }, + "com.amazonaws.wellarchitected#ImportLens": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#ImportLensInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#ImportLensOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#ConflictException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Import a new lens.

                                                                      \n

                                                                      The lens cannot be applied to workloads or shared with other Amazon Web Services accounts\n until it's published with CreateLensVersion\n

                                                                      \n

                                                                      Lenses are defined in JSON. For more information, see JSON format specification \n in the Well-Architected Tool User Guide.

                                                                      \n

                                                                      A custom lens cannot exceed 500 KB in size.

                                                                      \n \n \n

                                                                      \n Disclaimer\n

                                                                      \n

                                                                      Do not include or gather personal identifiable information (PII) of end users or \n other identifiable individuals in or via your custom lenses. If your custom \n lens or those shared with you and used in your account do include or collect \n PII you are responsible for: ensuring that the included PII is processed in accordance \n with applicable law, providing adequate privacy notices, and obtaining necessary \n consents for processing such data.

                                                                      \n
                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/importLens", + "code": 200 + } + } + }, + "com.amazonaws.wellarchitected#ImportLensInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias" + }, + "JSONString": { + "target": "com.amazonaws.wellarchitected#LensJSON", + "traits": { + "smithy.api#documentation": "

                                                                      The JSON representation of a lens.

                                                                      ", + "smithy.api#required": {} + } + }, + "ClientRequestToken": { + "target": "com.amazonaws.wellarchitected#ClientRequestToken", + "traits": { + "smithy.api#idempotencyToken": {}, + "smithy.api#required": {} + } + }, + "Tags": { + "target": "com.amazonaws.wellarchitected#TagMap", + "traits": { + "smithy.api#documentation": "

                                                                      Tags to associate to a lens.

                                                                      " + } + } + } + }, + "com.amazonaws.wellarchitected#ImportLensOutput": { + "type": "structure", + "members": { + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, + "Status": { + "target": "com.amazonaws.wellarchitected#ImportLensStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the imported lens.

                                                                      " + } + } + } + }, + "com.amazonaws.wellarchitected#ImportLensStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "IN_PROGRESS", + "name": "IN_PROGRESS" + }, + { + "value": "COMPLETE", + "name": "COMPLETE" + }, + { + "value": "ERROR", + "name": "ERROR" + } + ] + } + }, "com.amazonaws.wellarchitected#ImprovementPlanUrl": { "type": "string", "traits": { @@ -1516,6 +2148,12 @@ }, "ImprovementPlanUrl": { "target": "com.amazonaws.wellarchitected#ImprovementPlanUrl" + }, + "ImprovementPlans": { + "target": "com.amazonaws.wellarchitected#ChoiceImprovementPlans", + "traits": { + "smithy.api#documentation": "

                                                                      The improvement plan details.

                                                                      " + } } }, "traits": { @@ -1533,7 +2171,7 @@ } }, "traits": { - "smithy.api#documentation": "

                                                                      There is a problem with the AWS Well-Architected Tool API service.

                                                                      ", + "smithy.api#documentation": "

                                                                      There is a problem with the Well-Architected Tool API service.

                                                                      ", "smithy.api#error": "server", "smithy.api#httpError": 500 } @@ -1544,16 +2182,57 @@ "smithy.api#documentation": "

                                                                      Defines whether this question is applicable to a lens review.

                                                                      " } }, + "com.amazonaws.wellarchitected#IsMajorVersion": { + "type": "boolean" + }, "com.amazonaws.wellarchitected#IsReviewOwnerUpdateAcknowledged": { "type": "boolean" }, + "com.amazonaws.wellarchitected#Lens": { + "type": "structure", + "members": { + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of a lens.

                                                                      " + } + }, + "LensVersion": { + "target": "com.amazonaws.wellarchitected#LensVersion", + "traits": { + "smithy.api#documentation": "

                                                                      The version of a lens.

                                                                      " + } + }, + "Name": { + "target": "com.amazonaws.wellarchitected#LensName" + }, + "Description": { + "target": "com.amazonaws.wellarchitected#LensDescription" + }, + "Owner": { + "target": "com.amazonaws.wellarchitected#LensOwner", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID that owns the lens.

                                                                      " + } + }, + "ShareInvitationId": { + "target": "com.amazonaws.wellarchitected#ShareInvitationId", + "traits": { + "smithy.api#documentation": "

                                                                      The ID assigned to the share invitation.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A lens return object.

                                                                      " + } + }, "com.amazonaws.wellarchitected#LensAlias": { "type": "string", "traits": { "smithy.api#documentation": "

                                                                      The alias of the lens, for example, serverless.

                                                                      \n

                                                                      Each lens is identified by its LensSummary$LensAlias.

                                                                      ", "smithy.api#length": { "min": 1, - "max": 64 + "max": 128 } } }, @@ -1563,12 +2242,15 @@ "target": "com.amazonaws.wellarchitected#LensAlias" }, "traits": { - "smithy.api#documentation": "

                                                                      List of lens aliases to associate or disassociate with a workload.

                                                                      \n

                                                                      Identify a lens using its LensSummary$LensAlias.

                                                                      ", + "smithy.api#documentation": "

                                                                      List of lens aliases to associate or disassociate with a workload. Up to 10 lenses can be specified.

                                                                      \n

                                                                      Identify a lens using its LensSummary$LensAlias.

                                                                      ", "smithy.api#length": { "min": 1 } } }, + "com.amazonaws.wellarchitected#LensArn": { + "type": "string" + }, "com.amazonaws.wellarchitected#LensDescription": { "type": "string", "traits": { @@ -1579,6 +2261,15 @@ } } }, + "com.amazonaws.wellarchitected#LensJSON": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 500000 + } + } + }, "com.amazonaws.wellarchitected#LensName": { "type": "string", "traits": { @@ -1589,12 +2280,30 @@ } } }, + "com.amazonaws.wellarchitected#LensNamePrefix": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 100 + } + } + }, + "com.amazonaws.wellarchitected#LensOwner": { + "type": "string" + }, "com.amazonaws.wellarchitected#LensReview": { "type": "structure", "members": { "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "LensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { @@ -1636,6 +2345,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "Base64String": { "target": "com.amazonaws.wellarchitected#Base64String" } @@ -1659,6 +2374,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "LensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { @@ -1674,15 +2395,38 @@ "smithy.api#documentation": "

                                                                      The status of the lens.

                                                                      " } }, - "UpdatedAt": { - "target": "com.amazonaws.wellarchitected#Timestamp" + "UpdatedAt": { + "target": "com.amazonaws.wellarchitected#Timestamp" + }, + "RiskCounts": { + "target": "com.amazonaws.wellarchitected#RiskCounts" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A lens review summary of a workload.

                                                                      " + } + }, + "com.amazonaws.wellarchitected#LensShareSummaries": { + "type": "list", + "member": { + "target": "com.amazonaws.wellarchitected#LensShareSummary" + } + }, + "com.amazonaws.wellarchitected#LensShareSummary": { + "type": "structure", + "members": { + "ShareId": { + "target": "com.amazonaws.wellarchitected#ShareId" + }, + "SharedWith": { + "target": "com.amazonaws.wellarchitected#SharedWith" }, - "RiskCounts": { - "target": "com.amazonaws.wellarchitected#RiskCounts" + "Status": { + "target": "com.amazonaws.wellarchitected#ShareStatus" } }, "traits": { - "smithy.api#documentation": "

                                                                      A lens review summary of a workload.

                                                                      " + "smithy.api#documentation": "

                                                                      A lens share summary return object.

                                                                      " } }, "com.amazonaws.wellarchitected#LensStatus": { @@ -1700,6 +2444,33 @@ { "value": "DEPRECATED", "name": "DEPRECATED" + }, + { + "value": "DELETED", + "name": "DELETED" + }, + { + "value": "UNSHARED", + "name": "UNSHARED" + } + ] + } + }, + "com.amazonaws.wellarchitected#LensStatusType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "ALL", + "name": "ALL" + }, + { + "value": "DRAFT", + "name": "DRAFT" + }, + { + "value": "PUBLISHED", + "name": "PUBLISHED" } ] } @@ -1716,26 +2487,72 @@ "com.amazonaws.wellarchitected#LensSummary": { "type": "structure", "members": { + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the lens.

                                                                      " + } + }, "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensName": { + "target": "com.amazonaws.wellarchitected#LensName" + }, + "LensType": { + "target": "com.amazonaws.wellarchitected#LensType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the lens.

                                                                      " + } + }, + "Description": { + "target": "com.amazonaws.wellarchitected#LensDescription" + }, + "CreatedAt": { + "target": "com.amazonaws.wellarchitected#Timestamp" + }, + "UpdatedAt": { + "target": "com.amazonaws.wellarchitected#Timestamp" + }, "LensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { "smithy.api#documentation": "

                                                                      The version of the lens.

                                                                      " } }, - "LensName": { - "target": "com.amazonaws.wellarchitected#LensName" + "Owner": { + "target": "com.amazonaws.wellarchitected#AwsAccountId" }, - "Description": { - "target": "com.amazonaws.wellarchitected#LensDescription" + "LensStatus": { + "target": "com.amazonaws.wellarchitected#LensStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the lens.

                                                                      " + } } }, "traits": { "smithy.api#documentation": "

                                                                      A lens summary of a lens.

                                                                      " } }, + "com.amazonaws.wellarchitected#LensType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AWS_OFFICIAL", + "name": "AWS_OFFICIAL" + }, + { + "value": "CUSTOM_SHARED", + "name": "CUSTOM_SHARED" + }, + { + "value": "CUSTOM_SELF", + "name": "CUSTOM_SELF" + } + ] + } + }, "com.amazonaws.wellarchitected#LensUpgradeSummary": { "type": "structure", "members": { @@ -1748,6 +2565,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "CurrentLensVersion": { "target": "com.amazonaws.wellarchitected#LensVersion", "traits": { @@ -1770,7 +2593,7 @@ "traits": { "smithy.api#length": { "min": 1, - "max": 128 + "max": 32 } } }, @@ -1881,6 +2704,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "AnswerSummaries": { "target": "com.amazonaws.wellarchitected#AnswerSummaries" }, @@ -1999,6 +2828,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "ImprovementSummaries": { "target": "com.amazonaws.wellarchitected#ImprovementSummaries" }, @@ -2102,6 +2937,91 @@ "smithy.api#documentation": "

                                                                      Output of a list lens reviews call.

                                                                      " } }, + "com.amazonaws.wellarchitected#ListLensShares": { + "type": "operation", + "input": { + "target": "com.amazonaws.wellarchitected#ListLensSharesInput" + }, + "output": { + "target": "com.amazonaws.wellarchitected#ListLensSharesOutput" + }, + "errors": [ + { + "target": "com.amazonaws.wellarchitected#AccessDeniedException" + }, + { + "target": "com.amazonaws.wellarchitected#InternalServerException" + }, + { + "target": "com.amazonaws.wellarchitected#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.wellarchitected#ThrottlingException" + }, + { + "target": "com.amazonaws.wellarchitected#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      List the lens shares associated with the lens.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/lenses/{LensAlias}/shares", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "NextToken", + "outputToken": "NextToken", + "pageSize": "MaxResults" + } + } + }, + "com.amazonaws.wellarchitected#ListLensSharesInput": { + "type": "structure", + "members": { + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias", + "traits": { + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "SharedWithPrefix": { + "target": "com.amazonaws.wellarchitected#SharedWithPrefix", + "traits": { + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID or IAM role with which the lens is shared.

                                                                      ", + "smithy.api#httpQuery": "SharedWithPrefix" + } + }, + "NextToken": { + "target": "com.amazonaws.wellarchitected#NextToken", + "traits": { + "smithy.api#httpQuery": "NextToken" + } + }, + "MaxResults": { + "target": "com.amazonaws.wellarchitected#ListWorkloadSharesMaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to return for this request.

                                                                      ", + "smithy.api#httpQuery": "MaxResults" + } + } + } + }, + "com.amazonaws.wellarchitected#ListLensSharesOutput": { + "type": "structure", + "members": { + "LensShareSummaries": { + "target": "com.amazonaws.wellarchitected#LensShareSummaries", + "traits": { + "smithy.api#documentation": "

                                                                      A list of lens share summaries.

                                                                      " + } + }, + "NextToken": { + "target": "com.amazonaws.wellarchitected#NextToken" + } + } + }, "com.amazonaws.wellarchitected#ListLenses": { "type": "operation", "input": { @@ -2152,6 +3072,26 @@ "traits": { "smithy.api#httpQuery": "MaxResults" } + }, + "LensType": { + "target": "com.amazonaws.wellarchitected#LensType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of lenses to be returned.

                                                                      ", + "smithy.api#httpQuery": "LensType" + } + }, + "LensStatus": { + "target": "com.amazonaws.wellarchitected#LensStatusType", + "traits": { + "smithy.api#documentation": "

                                                                      The status of lenses to be returned.

                                                                      ", + "smithy.api#httpQuery": "LensStatus" + } + }, + "LensName": { + "target": "com.amazonaws.wellarchitected#LensName", + "traits": { + "smithy.api#httpQuery": "LensName" + } } }, "traits": { @@ -2370,6 +3310,20 @@ "smithy.api#httpQuery": "WorkloadNamePrefix" } }, + "LensNamePrefix": { + "target": "com.amazonaws.wellarchitected#LensNamePrefix", + "traits": { + "smithy.api#documentation": "

                                                                      An optional string added to the beginning of each lens name returned in the results.

                                                                      ", + "smithy.api#httpQuery": "LensNamePrefix" + } + }, + "ShareResourceType": { + "target": "com.amazonaws.wellarchitected#ShareResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of share invitations to be returned.

                                                                      ", + "smithy.api#httpQuery": "ShareResourceType" + } + }, "NextToken": { "target": "com.amazonaws.wellarchitected#NextToken", "traits": { @@ -2514,7 +3468,7 @@ "SharedWithPrefix": { "target": "com.amazonaws.wellarchitected#SharedWithPrefix", "traits": { - "smithy.api#documentation": "

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      ", "smithy.api#httpQuery": "SharedWithPrefix" } }, @@ -2799,6 +3753,9 @@ "PillarId": { "target": "com.amazonaws.wellarchitected#PillarId" }, + "PillarName": { + "target": "com.amazonaws.wellarchitected#PillarName" + }, "DifferenceStatus": { "target": "com.amazonaws.wellarchitected#DifferenceStatus", "traits": { @@ -3078,8 +4035,23 @@ "smithy.api#documentation": "

                                                                      The ID assigned to the share invitation.

                                                                      " } }, + "ShareResourceType": { + "target": "com.amazonaws.wellarchitected#ShareResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type of the share invitation.

                                                                      " + } + }, "WorkloadId": { "target": "com.amazonaws.wellarchitected#WorkloadId" + }, + "LensAlias": { + "target": "com.amazonaws.wellarchitected#LensAlias" + }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } } }, "traits": { @@ -3132,17 +4104,47 @@ "PermissionType": { "target": "com.amazonaws.wellarchitected#PermissionType" }, + "ShareResourceType": { + "target": "com.amazonaws.wellarchitected#ShareResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      The resource type of the share invitation.

                                                                      " + } + }, "WorkloadName": { "target": "com.amazonaws.wellarchitected#WorkloadName" }, "WorkloadId": { "target": "com.amazonaws.wellarchitected#WorkloadId" + }, + "LensName": { + "target": "com.amazonaws.wellarchitected#LensName" + }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } } }, "traits": { "smithy.api#documentation": "

                                                                      A share invitation summary return object.

                                                                      " } }, + "com.amazonaws.wellarchitected#ShareResourceType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "WORKLOAD", + "name": "WORKLOAD" + }, + { + "value": "LENS", + "name": "LENS" + } + ] + } + }, "com.amazonaws.wellarchitected#ShareStatus": { "type": "string", "traits": { @@ -3174,7 +4176,7 @@ "com.amazonaws.wellarchitected#SharedWith": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The AWS account ID or IAM role with which the workload is shared.

                                                                      ", + "smithy.api#documentation": "

                                                                      The Amazon Web Services account ID or IAM role with which the workload is shared.

                                                                      ", "smithy.api#length": { "min": 12, "max": 2048 @@ -3456,6 +4458,12 @@ "LensAlias": { "target": "com.amazonaws.wellarchitected#LensAlias" }, + "LensArn": { + "target": "com.amazonaws.wellarchitected#LensArn", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN for the lens.

                                                                      " + } + }, "Answer": { "target": "com.amazonaws.wellarchitected#Answer" } @@ -3959,11 +4967,32 @@ }, "com.amazonaws.wellarchitected#WellArchitectedApiServiceLambda": { "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "WellArchitected", + "arnNamespace": "wellarchitected", + "cloudFormationName": "WellArchitected", + "cloudTrailEventSource": "wellarchitected.amazonaws.com", + "endpointPrefix": "wellarchitected" + }, + "aws.auth#sigv4": { + "name": "wellarchitected" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "Well-Architected Tool\n \n

                                                                      This is the Well-Architected Tool API Reference. The WA Tool API provides programmatic access to the \n Well-Architected Tool in the \n Amazon Web Services Management Console. For information \n about the Well-Architected Tool, see the \n Well-Architected Tool User Guide.

                                                                      ", + "smithy.api#title": "AWS Well-Architected Tool" + }, "version": "2020-03-31", "operations": [ { "target": "com.amazonaws.wellarchitected#AssociateLenses" }, + { + "target": "com.amazonaws.wellarchitected#CreateLensShare" + }, + { + "target": "com.amazonaws.wellarchitected#CreateLensVersion" + }, { "target": "com.amazonaws.wellarchitected#CreateMilestone" }, @@ -3973,6 +5002,12 @@ { "target": "com.amazonaws.wellarchitected#CreateWorkloadShare" }, + { + "target": "com.amazonaws.wellarchitected#DeleteLens" + }, + { + "target": "com.amazonaws.wellarchitected#DeleteLensShare" + }, { "target": "com.amazonaws.wellarchitected#DeleteWorkload" }, @@ -3982,9 +5017,15 @@ { "target": "com.amazonaws.wellarchitected#DisassociateLenses" }, + { + "target": "com.amazonaws.wellarchitected#ExportLens" + }, { "target": "com.amazonaws.wellarchitected#GetAnswer" }, + { + "target": "com.amazonaws.wellarchitected#GetLens" + }, { "target": "com.amazonaws.wellarchitected#GetLensReview" }, @@ -4000,6 +5041,9 @@ { "target": "com.amazonaws.wellarchitected#GetWorkload" }, + { + "target": "com.amazonaws.wellarchitected#ImportLens" + }, { "target": "com.amazonaws.wellarchitected#ListAnswers" }, @@ -4012,6 +5056,9 @@ { "target": "com.amazonaws.wellarchitected#ListLensReviews" }, + { + "target": "com.amazonaws.wellarchitected#ListLensShares" + }, { "target": "com.amazonaws.wellarchitected#ListMilestones" }, @@ -4054,22 +5101,7 @@ { "target": "com.amazonaws.wellarchitected#UpgradeLensReview" } - ], - "traits": { - "aws.api#service": { - "sdkId": "WellArchitected", - "arnNamespace": "wellarchitected", - "cloudFormationName": "WellArchitected", - "cloudTrailEventSource": "wellarchitected.amazonaws.com", - "endpointPrefix": "wellarchitected" - }, - "aws.auth#sigv4": { - "name": "wellarchitected" - }, - "aws.protocols#restJson1": {}, - "smithy.api#documentation": "AWS Well-Architected Tool\n \n

                                                                      This is the AWS Well-Architected Tool API Reference. The AWS Well-Architected Tool API provides programmatic access to the \n AWS Well-Architected Tool in the \n AWS Management Console. For information \n about the AWS Well-Architected Tool, see the \n AWS Well-Architected Tool User Guide.

                                                                      ", - "smithy.api#title": "AWS Well-Architected Tool" - } + ] }, "com.amazonaws.wellarchitected#Workload": { "type": "structure", @@ -4163,7 +5195,7 @@ "target": "com.amazonaws.wellarchitected#AwsAccountId" }, "traits": { - "smithy.api#documentation": "

                                                                      The list of AWS account IDs associated with the workload.

                                                                      ", + "smithy.api#documentation": "

                                                                      The list of Amazon Web Services account IDs associated with the workload.

                                                                      ", "smithy.api#length": { "min": 0, "max": 100 @@ -4192,7 +5224,7 @@ "target": "com.amazonaws.wellarchitected#AwsRegion" }, "traits": { - "smithy.api#documentation": "

                                                                      The list of AWS Regions associated with the workload, for example,\n us-east-2, or ca-central-1.

                                                                      ", + "smithy.api#documentation": "

                                                                      The list of Amazon Web Services Regions associated with the workload, for example,\n us-east-2, or ca-central-1.

                                                                      ", "smithy.api#length": { "min": 0, "max": 50 @@ -4228,7 +5260,7 @@ "com.amazonaws.wellarchitected#WorkloadId": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The ID assigned to the workload. This ID is unique within an AWS Region.

                                                                      ", + "smithy.api#documentation": "

                                                                      The ID assigned to the workload. This ID is unique within an Amazon Web Services Region.

                                                                      ", "smithy.api#pattern": "^[0-9a-f]{32}$" } }, @@ -4292,7 +5324,7 @@ "com.amazonaws.wellarchitected#WorkloadName": { "type": "string", "traits": { - "smithy.api#documentation": "

                                                                      The name of the workload.

                                                                      \n

                                                                      The name must be unique within an account within a Region. Spaces and capitalization\n are ignored when checking for uniqueness.

                                                                      ", + "smithy.api#documentation": "

                                                                      The name of the workload.

                                                                      \n

                                                                      The name must be unique within an account within an Amazon Web Services Region. Spaces and capitalization\n are ignored when checking for uniqueness.

                                                                      ", "smithy.api#length": { "min": 3, "max": 100 @@ -4324,7 +5356,7 @@ "target": "com.amazonaws.wellarchitected#WorkloadNonAwsRegion" }, "traits": { - "smithy.api#documentation": "

                                                                      The list of non-AWS Regions associated with the workload.

                                                                      ", + "smithy.api#documentation": "

                                                                      The list of non-Amazon Web Services Regions associated with the workload.

                                                                      ", "smithy.api#length": { "min": 0, "max": 5 diff --git a/codegen/sdk-codegen/aws-models/workspaces-web.json b/codegen/sdk-codegen/aws-models/workspaces-web.json new file mode 100644 index 000000000000..c76a7d612cad --- /dev/null +++ b/codegen/sdk-codegen/aws-models/workspaces-web.json @@ -0,0 +1,4446 @@ +{ + "smithy": "1.0", + "shapes": { + "com.amazonaws.workspacesweb#ARN": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:[\\w+=\\/,.@-]+:[a-zA-Z0-9\\-]+:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:[a-zA-Z]+(\\/[a-fA-F0-9\\-]{36})+$" + } + }, + "com.amazonaws.workspacesweb#AWSErmineControlPlaneService": { + "type": "service", + "traits": { + "aws.api#service": { + "sdkId": "WorkSpaces Web", + "arnNamespace": "workspaces-web", + "cloudFormationName": "WorkSpacesWeb", + "cloudTrailEventSource": "workspaces-web.amazonaws.com", + "endpointPrefix": "workspaces-web" + }, + "aws.auth#sigv4": { + "name": "workspaces-web" + }, + "aws.protocols#restJson1": {}, + "smithy.api#documentation": "

                                                                      WorkSpaces Web is a low cost, fully managed WorkSpace built specifically to facilitate\n secure, web-based workloads. WorkSpaces Web makes it easy for customers to safely provide\n their employees with access to internal websites and SaaS web applications without the\n administrative burden of appliances or specialized client software. WorkSpaces Web provides\n simple policy tools tailored for user interactions, while offloading common tasks like\n capacity management, scaling, and maintaining browser images.

                                                                      ", + "smithy.api#title": "Amazon WorkSpaces Web" + }, + "version": "2020-07-08", + "operations": [ + { + "target": "com.amazonaws.workspacesweb#AssociateBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#AssociateNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#AssociateTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#AssociateUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#CreateBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#CreateIdentityProvider" + }, + { + "target": "com.amazonaws.workspacesweb#CreateNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#CreatePortal" + }, + { + "target": "com.amazonaws.workspacesweb#CreateTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#CreateUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DeleteBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DeleteIdentityProvider" + }, + { + "target": "com.amazonaws.workspacesweb#DeleteNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DeletePortal" + }, + { + "target": "com.amazonaws.workspacesweb#DeleteTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#DeleteUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DisassociateBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DisassociateNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#DisassociateTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#DisassociateUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#GetBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#GetIdentityProvider" + }, + { + "target": "com.amazonaws.workspacesweb#GetNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#GetPortal" + }, + { + "target": "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadata" + }, + { + "target": "com.amazonaws.workspacesweb#GetTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#GetTrustStoreCertificate" + }, + { + "target": "com.amazonaws.workspacesweb#GetUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#ListBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#ListIdentityProviders" + }, + { + "target": "com.amazonaws.workspacesweb#ListNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#ListPortals" + }, + { + "target": "com.amazonaws.workspacesweb#ListTagsForResource" + }, + { + "target": "com.amazonaws.workspacesweb#ListTrustStoreCertificates" + }, + { + "target": "com.amazonaws.workspacesweb#ListTrustStores" + }, + { + "target": "com.amazonaws.workspacesweb#ListUserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#TagResource" + }, + { + "target": "com.amazonaws.workspacesweb#UntagResource" + }, + { + "target": "com.amazonaws.workspacesweb#UpdateBrowserSettings" + }, + { + "target": "com.amazonaws.workspacesweb#UpdateIdentityProvider" + }, + { + "target": "com.amazonaws.workspacesweb#UpdateNetworkSettings" + }, + { + "target": "com.amazonaws.workspacesweb#UpdatePortal" + }, + { + "target": "com.amazonaws.workspacesweb#UpdateTrustStore" + }, + { + "target": "com.amazonaws.workspacesweb#UpdateUserSettings" + } + ] + }, + "com.amazonaws.workspacesweb#AccessDeniedException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Access is denied.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 403 + } + }, + "com.amazonaws.workspacesweb#ArnList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#ARN" + } + }, + "com.amazonaws.workspacesweb#AssociateBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#AssociateBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#AssociateBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates a browser settings resource with a web portal.

                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/portals/{portalArn+}/browserSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#AssociateBrowserSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#httpQuery": "browserSettingsArn", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateBrowserSettingsResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#AssociateNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#AssociateNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates a network settings resource with a web portal.

                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/portals/{portalArn+}/networkSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#AssociateNetworkSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#httpQuery": "networkSettingsArn", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateNetworkSettingsResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#AssociateTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#AssociateTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates a trust store with a web portal.

                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/portals/{portalArn+}/trustStores", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#AssociateTrustStoreRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#httpQuery": "trustStoreArn", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateTrustStoreResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#AssociateUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#AssociateUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Associates a user settings resource with a web portal.

                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/portals/{portalArn+}/userSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#AssociateUserSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#httpQuery": "userSettingsArn", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#AssociateUserSettingsResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#BrowserPolicy": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 2, + "max": 131072 + }, + "smithy.api#pattern": "\\{[\\S\\s]*\\}\\s*" + } + }, + "com.amazonaws.workspacesweb#BrowserSettings": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#required": {} + } + }, + "associatedPortalArns": { + "target": "com.amazonaws.workspacesweb#ArnList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of web portal ARNs that this browser settings is associated with.

                                                                      " + } + }, + "browserPolicy": { + "target": "com.amazonaws.workspacesweb#BrowserPolicy", + "traits": { + "smithy.api#documentation": "

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all\n streaming sessions.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The browser settings resource that can be associated with a web portal. Once associated\n with a web portal, browser settings control how the browser will behave once a user starts\n a streaming session for the web portal.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#BrowserSettingsList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#BrowserSettingsSummary" + } + }, + "com.amazonaws.workspacesweb#BrowserSettingsSummary": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary for browser settings.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#BrowserType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Chrome", + "name": "CHROME" + } + ] + } + }, + "com.amazonaws.workspacesweb#Certificate": { + "type": "structure", + "members": { + "thumbprint": { + "target": "com.amazonaws.workspacesweb#CertificateThumbprint", + "traits": { + "smithy.api#documentation": "

                                                                      A hexadecimal identifier for the certificate.

                                                                      " + } + }, + "subject": { + "target": "com.amazonaws.workspacesweb#CertificatePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      The entity the certificate belongs to.

                                                                      " + } + }, + "issuer": { + "target": "com.amazonaws.workspacesweb#CertificatePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      The entity that issued the certificate.

                                                                      " + } + }, + "notValidBefore": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate is not valid before this date.

                                                                      " + } + }, + "notValidAfter": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate is not valid after this date.

                                                                      " + } + }, + "body": { + "target": "com.amazonaws.workspacesweb#CertificateAuthorityBody", + "traits": { + "smithy.api#documentation": "

                                                                      The body of the certificate.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The certificate.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#CertificateAuthorityBody": { + "type": "blob" + }, + "com.amazonaws.workspacesweb#CertificateList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#CertificateAuthorityBody" + } + }, + "com.amazonaws.workspacesweb#CertificatePrincipal": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 256 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.workspacesweb#CertificateSummary": { + "type": "structure", + "members": { + "thumbprint": { + "target": "com.amazonaws.workspacesweb#CertificateThumbprint", + "traits": { + "smithy.api#documentation": "

                                                                      A hexadecimal identifier for the certificate.

                                                                      " + } + }, + "subject": { + "target": "com.amazonaws.workspacesweb#CertificatePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      The entity the certificate belongs to.

                                                                      " + } + }, + "issuer": { + "target": "com.amazonaws.workspacesweb#CertificatePrincipal", + "traits": { + "smithy.api#documentation": "

                                                                      The entity that issued the certificate.

                                                                      " + } + }, + "notValidBefore": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate is not valid before this date.

                                                                      " + } + }, + "notValidAfter": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate is not valid after this date.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of the certificate.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#CertificateSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#CertificateSummary" + } + }, + "com.amazonaws.workspacesweb#CertificateThumbprint": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 64, + "max": 64 + }, + "smithy.api#pattern": "^[A-Fa-f0-9]{64}$" + } + }, + "com.amazonaws.workspacesweb#CertificateThumbprintList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#CertificateThumbprint" + } + }, + "com.amazonaws.workspacesweb#ClientToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 512 + } + } + }, + "com.amazonaws.workspacesweb#ConflictException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "resourceId": { + "target": "com.amazonaws.workspacesweb#ResourceId", + "traits": { + "smithy.api#documentation": "

                                                                      Identifier of the resource affected.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.workspacesweb#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      Type of the resource affected.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      There is a conflict.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 409 + } + }, + "com.amazonaws.workspacesweb#CreateBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreateBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreateBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a browser settings resource that can be associated with a web portal. Once\n associated with a web portal, browser settings control how the browser will behave once a\n user starts a streaming session for the web portal.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/browserSettings", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreateBrowserSettingsRequest": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to add to the browser settings resource. A tag is a key-value pair.

                                                                      " + } + }, + "customerManagedKey": { + "target": "com.amazonaws.workspacesweb#keyArn", + "traits": { + "smithy.api#documentation": "

                                                                      The custom managed key of the browser settings.

                                                                      " + } + }, + "additionalEncryptionContext": { + "target": "com.amazonaws.workspacesweb#EncryptionContextMap", + "traits": { + "smithy.api#documentation": "

                                                                      Additional encryption context of the browser settings.

                                                                      " + } + }, + "browserPolicy": { + "target": "com.amazonaws.workspacesweb#BrowserPolicy", + "traits": { + "smithy.api#documentation": "

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all\n streaming sessions.

                                                                      ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateBrowserSettingsResponse": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateIdentityProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreateIdentityProviderRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreateIdentityProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates an identity provider resource that is then associated with a web portal.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/identityProviders", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreateIdentityProviderRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "identityProviderName": { + "target": "com.amazonaws.workspacesweb#IdentityProviderName", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider name.

                                                                      ", + "smithy.api#required": {} + } + }, + "identityProviderType": { + "target": "com.amazonaws.workspacesweb#IdentityProviderType", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider type.

                                                                      ", + "smithy.api#required": {} + } + }, + "identityProviderDetails": { + "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For Google and Login with Amazon:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For Facebook:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n api_version\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For Sign in with Apple:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n team_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n key_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n private_key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For OIDC providers:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n attributes_request_method\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n oidc_issuer\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_url\n if not available from discovery URL specified by\n oidc_issuer key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n token_url\n if not available from discovery URL specified by\n oidc_issuer key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n attributes_url\n if not available from discovery URL specified by\n oidc_issuer key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n jwks_uri\n if not available from discovery URL specified by\n oidc_issuer key\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SAML providers:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n MetadataFile OR MetadataURL\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n IDPSignout\n optional\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateIdentityProviderResponse": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreateNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreateNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a network settings resource that can be associated with a web portal. Once\n associated with a web portal, network settings define how streaming instances will connect\n with your specified VPC.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/networkSettings", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreateNetworkSettingsRequest": { + "type": "structure", + "members": { + "vpcId": { + "target": "com.amazonaws.workspacesweb#VpcId", + "traits": { + "smithy.api#documentation": "

                                                                      The VPC that streaming instances will connect to.

                                                                      ", + "smithy.api#required": {} + } + }, + "subnetIds": { + "target": "com.amazonaws.workspacesweb#SubnetIdList", + "traits": { + "smithy.api#documentation": "

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      ", + "smithy.api#required": {} + } + }, + "securityGroupIds": { + "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to add to the network settings resource. A tag is a key-value pair.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateNetworkSettingsResponse": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreatePortal": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreatePortalRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreatePortalResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a web portal.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/portals", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreatePortalRequest": { + "type": "structure", + "members": { + "displayName": { + "target": "com.amazonaws.workspacesweb#DisplayName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the web portal. This is not visible to users who log into the web portal.

                                                                      " + } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to add to the web portal. A tag is a key-value pair.

                                                                      " + } + }, + "customerManagedKey": { + "target": "com.amazonaws.workspacesweb#keyArn", + "traits": { + "smithy.api#documentation": "

                                                                      The customer managed key of the web portal.

                                                                      " + } + }, + "additionalEncryptionContext": { + "target": "com.amazonaws.workspacesweb#EncryptionContextMap", + "traits": { + "smithy.api#documentation": "

                                                                      The additional encryption context of the portal.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreatePortalResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "portalEndpoint": { + "target": "com.amazonaws.workspacesweb#PortalEndpoint", + "traits": { + "smithy.api#documentation": "

                                                                      The endpoint URL of the web portal that users access in order to start streaming sessions.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreateTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreateTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a trust store that can be associated with a web portal. A trust store contains\n certificate authority (CA) certificates. Once associated with a web portal, the browser in\n a streaming session will recognize certificates that have been issued using any of the CAs\n in the trust store. If your organization has internal websites that use certificates issued\n by private CAs, you should add the private CA certificate to the trust store.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/trustStores", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreateTrustStoreRequest": { + "type": "structure", + "members": { + "certificateList": { + "target": "com.amazonaws.workspacesweb#CertificateList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of CA certificates to be added to the trust store.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to add to the trust store. A tag is a key-value pair.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateTrustStoreResponse": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#CreateUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#CreateUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Creates a user settings resource that can be associated with a web portal. Once\n associated with a web portal, user settings control how users can transfer data between a\n streaming session and the their local devices.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/userSettings", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#CreateUserSettingsRequest": { + "type": "structure", + "members": { + "copyAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can copy text from the streaming session to the local\n device.

                                                                      ", + "smithy.api#required": {} + } + }, + "pasteAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can paste text from the local device to the streaming\n session.

                                                                      ", + "smithy.api#required": {} + } + }, + "downloadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can download files from the streaming session to the local\n device.

                                                                      ", + "smithy.api#required": {} + } + }, + "uploadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can upload files from the local device to the streaming\n session.

                                                                      ", + "smithy.api#required": {} + } + }, + "printAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can print to the local device.

                                                                      ", + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags to add to the user settings resource. A tag is a key-value pair.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#CreateUserSettingsResponse": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeleteBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeleteBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes browser settings.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/browserSettings/{browserSettingsArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeleteBrowserSettingsRequest": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteBrowserSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DeleteIdentityProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeleteIdentityProviderRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeleteIdentityProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes the identity provider.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/identityProviders/{identityProviderArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeleteIdentityProviderRequest": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteIdentityProviderResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DeleteNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeleteNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeleteNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes network settings.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/networkSettings/{networkSettingsArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeleteNetworkSettingsRequest": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteNetworkSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DeletePortal": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeletePortalRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeletePortalResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes a web portal.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeletePortalRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeletePortalResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DeleteTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeleteTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeleteTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes the trust store.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/trustStores/{trustStoreArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeleteTrustStoreRequest": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteTrustStoreResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DeleteUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DeleteUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DeleteUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#ConflictException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Deletes user settings.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/userSettings/{userSettingsArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DeleteUserSettingsRequest": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DeleteUserSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DisassociateBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DisassociateBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DisassociateBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates browser settings from a web portal.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalArn+}/browserSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DisassociateBrowserSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DisassociateBrowserSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DisassociateNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DisassociateNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DisassociateNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates network settings from a web portal.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalArn+}/networkSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DisassociateNetworkSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DisassociateNetworkSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DisassociateTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DisassociateTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DisassociateTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates a trust store from a web portal.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalArn+}/trustStores", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DisassociateTrustStoreRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DisassociateTrustStoreResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DisassociateUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#DisassociateUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#DisassociateUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Disassociates user settings from a web portal.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/portals/{portalArn+}/userSettings", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#DisassociateUserSettingsRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#DisassociateUserSettingsResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#DisplayName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 64 + }, + "smithy.api#pattern": "^.+$" + } + }, + "com.amazonaws.workspacesweb#EnabledType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Disabled", + "name": "DISABLED" + }, + { + "value": "Enabled", + "name": "ENABLED" + } + ] + } + }, + "com.amazonaws.workspacesweb#EncryptionContextMap": { + "type": "map", + "key": { + "target": "com.amazonaws.workspacesweb#StringType" + }, + "value": { + "target": "com.amazonaws.workspacesweb#StringType" + } + }, + "com.amazonaws.workspacesweb#ExceptionMessage": { + "type": "string" + }, + "com.amazonaws.workspacesweb#FieldName": { + "type": "string" + }, + "com.amazonaws.workspacesweb#GetBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets browser settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/browserSettings/{browserSettingsArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetBrowserSettingsRequest": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetBrowserSettingsResponse": { + "type": "structure", + "members": { + "browserSettings": { + "target": "com.amazonaws.workspacesweb#BrowserSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The browser settings.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetIdentityProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetIdentityProviderRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetIdentityProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the identity provider.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/identityProviders/{identityProviderArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetIdentityProviderRequest": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetIdentityProviderResponse": { + "type": "structure", + "members": { + "identityProvider": { + "target": "com.amazonaws.workspacesweb#IdentityProvider", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the network settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/networkSettings/{networkSettingsArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetNetworkSettingsRequest": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetNetworkSettingsResponse": { + "type": "structure", + "members": { + "networkSettings": { + "target": "com.amazonaws.workspacesweb#NetworkSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The network settings.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetPortal": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetPortalRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetPortalResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the web portal.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/portals/{portalArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetPortalRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetPortalResponse": { + "type": "structure", + "members": { + "portal": { + "target": "com.amazonaws.workspacesweb#Portal", + "traits": { + "smithy.api#documentation": "

                                                                      The web portal.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadata": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadataRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadataResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the service provider metadata.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/portalIdp/{portalArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadataRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetPortalServiceProviderMetadataResponse": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#required": {} + } + }, + "serviceProviderSamlMetadata": { + "target": "com.amazonaws.workspacesweb#SamlMetadata", + "traits": { + "smithy.api#documentation": "

                                                                      The service provider SAML metadata.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the trust store.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/trustStores/{trustStoreArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetTrustStoreCertificate": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetTrustStoreCertificateRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetTrustStoreCertificateResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets the trust store certificate.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/trustStores/{trustStoreArn+}/certificate", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetTrustStoreCertificateRequest": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store certificate.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "thumbprint": { + "target": "com.amazonaws.workspacesweb#CertificateThumbprint", + "traits": { + "smithy.api#documentation": "

                                                                      The thumbprint of the trust store certificate.

                                                                      ", + "smithy.api#httpQuery": "thumbprint", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetTrustStoreCertificateResponse": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store certificate.

                                                                      " + } + }, + "certificate": { + "target": "com.amazonaws.workspacesweb#Certificate", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate of the trust store certificate.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetTrustStoreRequest": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetTrustStoreResponse": { + "type": "structure", + "members": { + "trustStore": { + "target": "com.amazonaws.workspacesweb#TrustStore", + "traits": { + "smithy.api#documentation": "

                                                                      The trust store.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#GetUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#GetUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#GetUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Gets user settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/userSettings/{userSettingsArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#GetUserSettingsRequest": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#GetUserSettingsResponse": { + "type": "structure", + "members": { + "userSettings": { + "target": "com.amazonaws.workspacesweb#UserSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The user settings.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#IdentityProvider": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      ", + "smithy.api#required": {} + } + }, + "identityProviderName": { + "target": "com.amazonaws.workspacesweb#IdentityProviderName", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider name.

                                                                      " + } + }, + "identityProviderType": { + "target": "com.amazonaws.workspacesweb#IdentityProviderType", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider type.

                                                                      " + } + }, + "identityProviderDetails": { + "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider details. The following list describes the provider detail keys for\n each identity provider type.

                                                                      \n
                                                                        \n
                                                                      • \n

                                                                        For Google and Login with Amazon:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For Facebook:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n api_version\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For Sign in with Apple:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n team_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n key_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n private_key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For OIDC providers:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n client_id\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n client_secret\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n attributes_request_method\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n oidc_issuer\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_scopes\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n authorize_url\n if not available from discovery URL specified by oidc_issuer\n key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n token_url\n if not available from discovery URL specified by oidc_issuer\n key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n attributes_url\n if not available from discovery URL specified by oidc_issuer\n key\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n jwks_uri\n if not available from discovery URL specified by oidc_issuer\n key\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      • \n

                                                                        For SAML providers:

                                                                        \n
                                                                          \n
                                                                        • \n

                                                                          \n MetadataFile OR MetadataURL\n

                                                                          \n
                                                                        • \n
                                                                        • \n

                                                                          \n IDPSignout\n optional\n

                                                                          \n
                                                                        • \n
                                                                        \n
                                                                      • \n
                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#IdentityProviderDetails": { + "type": "map", + "key": { + "target": "com.amazonaws.workspacesweb#StringType" + }, + "value": { + "target": "com.amazonaws.workspacesweb#StringType" + } + }, + "com.amazonaws.workspacesweb#IdentityProviderList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#IdentityProviderSummary" + } + }, + "com.amazonaws.workspacesweb#IdentityProviderName": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^[^_][\\p{L}\\p{M}\\p{S}\\p{N}\\p{P}][^_]+$" + } + }, + "com.amazonaws.workspacesweb#IdentityProviderSummary": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      " + } + }, + "identityProviderName": { + "target": "com.amazonaws.workspacesweb#IdentityProviderName", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider name.

                                                                      " + } + }, + "identityProviderType": { + "target": "com.amazonaws.workspacesweb#IdentityProviderType", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider type.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of the identity provider.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#IdentityProviderType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "SAML", + "name": "SAML" + }, + { + "value": "Facebook", + "name": "Facebook" + }, + { + "value": "Google", + "name": "Google" + }, + { + "value": "LoginWithAmazon", + "name": "LoginWithAmazon" + }, + { + "value": "SignInWithApple", + "name": "SignInWithApple" + }, + { + "value": "OIDC", + "name": "OIDC" + } + ] + } + }, + "com.amazonaws.workspacesweb#InternalServerException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "retryAfterSeconds": { + "target": "com.amazonaws.workspacesweb#RetryAfterSeconds", + "traits": { + "smithy.api#documentation": "

                                                                      Advice to clients on when the call can be safely retried.

                                                                      ", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      There is an internal server error.

                                                                      ", + "smithy.api#error": "server", + "smithy.api#httpError": 500 + } + }, + "com.amazonaws.workspacesweb#ListBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of browser settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/browserSettings", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListBrowserSettingsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListBrowserSettingsResponse": { + "type": "structure", + "members": { + "browserSettings": { + "target": "com.amazonaws.workspacesweb#BrowserSettingsList", + "traits": { + "smithy.api#documentation": "

                                                                      The browser settings.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListIdentityProviders": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListIdentityProvidersRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListIdentityProvidersResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of identity providers for a specific web portal.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/portals/{portalArn+}/identityProviders", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListIdentityProvidersRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + }, + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#ListIdentityProvidersResponse": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + }, + "identityProviders": { + "target": "com.amazonaws.workspacesweb#IdentityProviderList", + "traits": { + "smithy.api#documentation": "

                                                                      The identity providers.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of network settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/networkSettings", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListNetworkSettingsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListNetworkSettingsResponse": { + "type": "structure", + "members": { + "networkSettings": { + "target": "com.amazonaws.workspacesweb#NetworkSettingsList", + "traits": { + "smithy.api#documentation": "

                                                                      The network settings.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListPortals": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListPortalsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListPortalsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list or web portals.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/portals", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListPortalsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListPortalsResponse": { + "type": "structure", + "members": { + "portals": { + "target": "com.amazonaws.workspacesweb#PortalList", + "traits": { + "smithy.api#documentation": "

                                                                      The portals in the list.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListTagsForResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListTagsForResourceRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListTagsForResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of tags for a resource.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/tags/{resourceArn+}", + "code": 200 + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListTagsForResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#ListTagsForResourceResponse": { + "type": "structure", + "members": { + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags of the resource.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListTrustStoreCertificates": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListTrustStoreCertificatesRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListTrustStoreCertificatesResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of trust store certificates.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/trustStores/{trustStoreArn+}/certificates", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListTrustStoreCertificatesRequest": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListTrustStoreCertificatesResponse": { + "type": "structure", + "members": { + "certificateList": { + "target": "com.amazonaws.workspacesweb#CertificateSummaryList", + "traits": { + "smithy.api#documentation": "

                                                                      The certificate list.

                                                                      " + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.>

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListTrustStores": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListTrustStoresRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListTrustStoresResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of trust stores.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/trustStores", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListTrustStoresRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListTrustStoresResponse": { + "type": "structure", + "members": { + "trustStores": { + "target": "com.amazonaws.workspacesweb#TrustStoreSummaryList", + "traits": { + "smithy.api#documentation": "

                                                                      The trust stores.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#ListUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#ListUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#ListUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Retrieves a list of user settings.

                                                                      ", + "smithy.api#http": { + "method": "GET", + "uri": "/userSettings", + "code": 200 + }, + "smithy.api#paginated": { + "inputToken": "nextToken", + "outputToken": "nextToken", + "pageSize": "maxResults" + }, + "smithy.api#readonly": {} + } + }, + "com.amazonaws.workspacesweb#ListUserSettingsRequest": { + "type": "structure", + "members": { + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      ", + "smithy.api#httpQuery": "nextToken" + } + }, + "maxResults": { + "target": "com.amazonaws.workspacesweb#MaxResults", + "traits": { + "smithy.api#documentation": "

                                                                      The maximum number of results to be included in the next page.

                                                                      ", + "smithy.api#httpQuery": "maxResults" + } + } + } + }, + "com.amazonaws.workspacesweb#ListUserSettingsResponse": { + "type": "structure", + "members": { + "userSettings": { + "target": "com.amazonaws.workspacesweb#UserSettingsList", + "traits": { + "smithy.api#documentation": "

                                                                      The user settings.

                                                                      " + } + }, + "nextToken": { + "target": "com.amazonaws.workspacesweb#PaginationToken", + "traits": { + "smithy.api#documentation": "

                                                                      The pagination token used to retrieve the next page of results for this operation.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#MaxResults": { + "type": "integer", + "traits": { + "smithy.api#box": {}, + "smithy.api#range": { + "min": 1 + } + } + }, + "com.amazonaws.workspacesweb#NetworkSettings": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#required": {} + } + }, + "associatedPortalArns": { + "target": "com.amazonaws.workspacesweb#ArnList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of web portal ARNs that this network settings is associated with.

                                                                      " + } + }, + "vpcId": { + "target": "com.amazonaws.workspacesweb#VpcId", + "traits": { + "smithy.api#documentation": "

                                                                      The VPC that streaming instances will connect to.

                                                                      " + } + }, + "subnetIds": { + "target": "com.amazonaws.workspacesweb#SubnetIdList", + "traits": { + "smithy.api#documentation": "

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      " + } + }, + "securityGroupIds": { + "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A network settings resource that can be associated with a web portal. Once associated\n with a web portal, network settings define how streaming instances will connect with your\n specified VPC.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#NetworkSettingsList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#NetworkSettingsSummary" + } + }, + "com.amazonaws.workspacesweb#NetworkSettingsSummary": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      " + } + }, + "vpcId": { + "target": "com.amazonaws.workspacesweb#VpcId", + "traits": { + "smithy.api#documentation": "

                                                                      The VPC ID of the network settings.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of network settings.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#PaginationToken": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 2048 + }, + "smithy.api#pattern": "^\\S+$" + } + }, + "com.amazonaws.workspacesweb#Portal": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      " + } + }, + "rendererType": { + "target": "com.amazonaws.workspacesweb#RendererType", + "traits": { + "smithy.api#documentation": "

                                                                      The renderer that is used in streaming sessions.

                                                                      " + } + }, + "browserType": { + "target": "com.amazonaws.workspacesweb#BrowserType", + "traits": { + "smithy.api#documentation": "

                                                                      The browser that users see when using a streaming session.

                                                                      " + } + }, + "portalStatus": { + "target": "com.amazonaws.workspacesweb#PortalStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the web portal.

                                                                      " + } + }, + "portalEndpoint": { + "target": "com.amazonaws.workspacesweb#PortalEndpoint", + "traits": { + "smithy.api#documentation": "

                                                                      The endpoint URL of the web portal that users access in order to start streaming\n sessions.

                                                                      " + } + }, + "displayName": { + "target": "com.amazonaws.workspacesweb#DisplayName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the web portal.

                                                                      " + } + }, + "creationDate": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The creation date of the web portal.

                                                                      " + } + }, + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings that is associated with this web portal.

                                                                      " + } + }, + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store that is associated with the web portal.

                                                                      " + } + }, + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings that is associated with the web portal.

                                                                      " + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store that is associated with the web portal.

                                                                      " + } + }, + "statusReason": { + "target": "com.amazonaws.workspacesweb#StatusReason", + "traits": { + "smithy.api#documentation": "

                                                                      A message that explains why the web portal is in its current status.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The web portal.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#PortalEndpoint": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 253 + }, + "smithy.api#pattern": "^[a-zA-Z0-9]?((?!-)([A-Za-z0-9-]*[A-Za-z0-9])\\.)+[a-zA-Z0-9]+$" + } + }, + "com.amazonaws.workspacesweb#PortalList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#PortalSummary" + } + }, + "com.amazonaws.workspacesweb#PortalStatus": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "Incomplete", + "name": "INCOMPLETE" + }, + { + "value": "Pending", + "name": "PENDING" + }, + { + "value": "Active", + "name": "ACTIVE" + } + ] + } + }, + "com.amazonaws.workspacesweb#PortalSummary": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      " + } + }, + "rendererType": { + "target": "com.amazonaws.workspacesweb#RendererType", + "traits": { + "smithy.api#documentation": "

                                                                      The renderer that is used in streaming sessions.

                                                                      " + } + }, + "browserType": { + "target": "com.amazonaws.workspacesweb#BrowserType", + "traits": { + "smithy.api#documentation": "

                                                                      The browser type of the web portal.

                                                                      " + } + }, + "portalStatus": { + "target": "com.amazonaws.workspacesweb#PortalStatus", + "traits": { + "smithy.api#documentation": "

                                                                      The status of the web portal.

                                                                      " + } + }, + "portalEndpoint": { + "target": "com.amazonaws.workspacesweb#PortalEndpoint", + "traits": { + "smithy.api#documentation": "

                                                                      The endpoint URL of the web portal that users access in order to start streaming\n sessions.

                                                                      " + } + }, + "displayName": { + "target": "com.amazonaws.workspacesweb#DisplayName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the web portal.

                                                                      " + } + }, + "creationDate": { + "target": "com.amazonaws.workspacesweb#Timestamp", + "traits": { + "smithy.api#documentation": "

                                                                      The creation date of the web portal.

                                                                      " + } + }, + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings that is associated with the web portal.

                                                                      " + } + }, + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings that is associated with the web portal.

                                                                      " + } + }, + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings that is associated with the web portal.

                                                                      " + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust that is associated with this web portal.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of the portal.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#QuotaCode": { + "type": "string" + }, + "com.amazonaws.workspacesweb#RendererType": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "AppStream", + "name": "APPSTREAM" + } + ] + } + }, + "com.amazonaws.workspacesweb#ResourceId": { + "type": "string" + }, + "com.amazonaws.workspacesweb#ResourceNotFoundException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "resourceId": { + "target": "com.amazonaws.workspacesweb#ResourceId", + "traits": { + "smithy.api#documentation": "

                                                                      Hypothetical identifier of the resource affected.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.workspacesweb#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      Hypothetical type of the resource affected.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The resource cannot be found.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 404 + } + }, + "com.amazonaws.workspacesweb#ResourceType": { + "type": "string" + }, + "com.amazonaws.workspacesweb#RetryAfterSeconds": { + "type": "integer" + }, + "com.amazonaws.workspacesweb#SamlMetadata": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 204800 + }, + "smithy.api#pattern": "^.+$" + } + }, + "com.amazonaws.workspacesweb#SecurityGroupId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^[\\w+\\-]+$" + } + }, + "com.amazonaws.workspacesweb#SecurityGroupIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#SecurityGroupId" + }, + "traits": { + "smithy.api#length": { + "min": 1, + "max": 5 + } + } + }, + "com.amazonaws.workspacesweb#ServiceCode": { + "type": "string" + }, + "com.amazonaws.workspacesweb#ServiceQuotaExceededException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "resourceId": { + "target": "com.amazonaws.workspacesweb#ResourceId", + "traits": { + "smithy.api#documentation": "

                                                                      Identifier of the resource affected.

                                                                      " + } + }, + "resourceType": { + "target": "com.amazonaws.workspacesweb#ResourceType", + "traits": { + "smithy.api#documentation": "

                                                                      Type of the resource affected.

                                                                      " + } + }, + "serviceCode": { + "target": "com.amazonaws.workspacesweb#ServiceCode", + "traits": { + "smithy.api#documentation": "

                                                                      The originating service.

                                                                      " + } + }, + "quotaCode": { + "target": "com.amazonaws.workspacesweb#QuotaCode", + "traits": { + "smithy.api#documentation": "

                                                                      The originating quota.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The service quota has been exceeded.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 402 + } + }, + "com.amazonaws.workspacesweb#StatusReason": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 1024 + }, + "smithy.api#pattern": ".*" + } + }, + "com.amazonaws.workspacesweb#StringType": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 131072 + }, + "smithy.api#pattern": "^[\\s\\S]*$" + } + }, + "com.amazonaws.workspacesweb#SubnetId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 32 + }, + "smithy.api#pattern": "^subnet-([0-9a-f]{8}|[0-9a-f]{17})$" + } + }, + "com.amazonaws.workspacesweb#SubnetIdList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#SubnetId" + }, + "traits": { + "smithy.api#length": { + "min": 2, + "max": 3 + } + } + }, + "com.amazonaws.workspacesweb#Tag": { + "type": "structure", + "members": { + "Key": { + "target": "com.amazonaws.workspacesweb#TagKey", + "traits": { + "smithy.api#documentation": "

                                                                      The key of the tag.

                                                                      ", + "smithy.api#required": {} + } + }, + "Value": { + "target": "com.amazonaws.workspacesweb#TagValue", + "traits": { + "smithy.api#documentation": "

                                                                      The value of the tag

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The tag.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#TagExceptionMessage": { + "type": "string" + }, + "com.amazonaws.workspacesweb#TagKey": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 128 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.workspacesweb#TagKeyList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#TagKey" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.workspacesweb#TagList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#Tag" + }, + "traits": { + "smithy.api#length": { + "min": 0, + "max": 200 + } + } + }, + "com.amazonaws.workspacesweb#TagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#TagResourceRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#TagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#TooManyTagsException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Adds or overwrites one or more tags for the specified resource.

                                                                      ", + "smithy.api#http": { + "method": "POST", + "uri": "/tags/{resourceArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#TagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tags": { + "target": "com.amazonaws.workspacesweb#TagList", + "traits": { + "smithy.api#documentation": "

                                                                      The tags of the resource.

                                                                      ", + "smithy.api#required": {} + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token returns the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#TagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#TagValue": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 0, + "max": 256 + }, + "smithy.api#pattern": "^([\\p{L}\\p{Z}\\p{N}_.:/=+\\-@]*)$" + } + }, + "com.amazonaws.workspacesweb#ThrottlingException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "serviceCode": { + "target": "com.amazonaws.workspacesweb#ServiceCode", + "traits": { + "smithy.api#documentation": "

                                                                      The originating service.

                                                                      " + } + }, + "quotaCode": { + "target": "com.amazonaws.workspacesweb#QuotaCode", + "traits": { + "smithy.api#documentation": "

                                                                      The originating quota.

                                                                      " + } + }, + "retryAfterSeconds": { + "target": "com.amazonaws.workspacesweb#RetryAfterSeconds", + "traits": { + "smithy.api#documentation": "

                                                                      Advice to clients on when the call can be safely retried.

                                                                      ", + "smithy.api#httpHeader": "Retry-After" + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      There is a throttling error.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 429 + } + }, + "com.amazonaws.workspacesweb#Timestamp": { + "type": "timestamp" + }, + "com.amazonaws.workspacesweb#TooManyTagsException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#TagExceptionMessage" + }, + "resourceName": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      Name of the resource affected.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      There are too many tags.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.workspacesweb#TrustStore": { + "type": "structure", + "members": { + "associatedPortalArns": { + "target": "com.amazonaws.workspacesweb#ArnList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of web portal ARNs that this trust store is associated with.

                                                                      " + } + }, + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A trust store that can be associated with a web portal. A trust store contains\n certificate authority (CA) certificates. Once associated with a web portal, the browser in\n a streaming session will recognize certificates that have been issued using any of the CAs\n in the trust store. If your organization has internal websites that use certificates issued\n by private CAs, you should add the private CA certificate to the trust store.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#TrustStoreSummary": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of the trust store.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#TrustStoreSummaryList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#TrustStoreSummary" + } + }, + "com.amazonaws.workspacesweb#UntagResource": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UntagResourceRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UntagResourceResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Removes one or more tags from the specified resource.

                                                                      ", + "smithy.api#http": { + "method": "DELETE", + "uri": "/tags/{resourceArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#UntagResourceRequest": { + "type": "structure", + "members": { + "resourceArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the resource.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "tagKeys": { + "target": "com.amazonaws.workspacesweb#TagKeyList", + "traits": { + "smithy.api#documentation": "

                                                                      The list of tag keys to remove from the resource.

                                                                      ", + "smithy.api#httpQuery": "tagKeys", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UntagResourceResponse": { + "type": "structure", + "members": {} + }, + "com.amazonaws.workspacesweb#UpdateBrowserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdateBrowserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdateBrowserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates browser settings.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/browserSettings/{browserSettingsArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#UpdateBrowserSettingsRequest": { + "type": "structure", + "members": { + "browserSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the browser settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "browserPolicy": { + "target": "com.amazonaws.workspacesweb#BrowserPolicy", + "traits": { + "smithy.api#documentation": "

                                                                      A JSON string containing Chrome Enterprise policies that will be applied to all\n streaming sessions.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token return the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateBrowserSettingsResponse": { + "type": "structure", + "members": { + "browserSettings": { + "target": "com.amazonaws.workspacesweb#BrowserSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The browser settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateIdentityProvider": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdateIdentityProviderRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdateIdentityProviderResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the identity provider.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/identityProviders/{identityProviderArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#UpdateIdentityProviderRequest": { + "type": "structure", + "members": { + "identityProviderArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the identity provider.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "identityProviderName": { + "target": "com.amazonaws.workspacesweb#IdentityProviderName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the identity provider.

                                                                      " + } + }, + "identityProviderType": { + "target": "com.amazonaws.workspacesweb#IdentityProviderType", + "traits": { + "smithy.api#documentation": "

                                                                      The type of the identity provider.

                                                                      " + } + }, + "identityProviderDetails": { + "target": "com.amazonaws.workspacesweb#IdentityProviderDetails", + "traits": { + "smithy.api#documentation": "

                                                                      The details of the identity provider.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token return the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateIdentityProviderResponse": { + "type": "structure", + "members": { + "identityProvider": { + "target": "com.amazonaws.workspacesweb#IdentityProvider", + "traits": { + "smithy.api#documentation": "

                                                                      The identity provider.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateNetworkSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdateNetworkSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdateNetworkSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates network settings.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/networkSettings/{networkSettingsArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#UpdateNetworkSettingsRequest": { + "type": "structure", + "members": { + "networkSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the network settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "vpcId": { + "target": "com.amazonaws.workspacesweb#VpcId", + "traits": { + "smithy.api#documentation": "

                                                                      The VPC that streaming instances will connect to.

                                                                      " + } + }, + "subnetIds": { + "target": "com.amazonaws.workspacesweb#SubnetIdList", + "traits": { + "smithy.api#documentation": "

                                                                      The subnets in which network interfaces are created to connect streaming instances to your VPC. At least two of these subnets must be in different availability zones.

                                                                      " + } + }, + "securityGroupIds": { + "target": "com.amazonaws.workspacesweb#SecurityGroupIdList", + "traits": { + "smithy.api#documentation": "

                                                                      One or more security groups used to control access from streaming instances to your VPC.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token return the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateNetworkSettingsResponse": { + "type": "structure", + "members": { + "networkSettings": { + "target": "com.amazonaws.workspacesweb#NetworkSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The network settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdatePortal": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdatePortalRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdatePortalResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates a web portal.

                                                                      ", + "smithy.api#http": { + "method": "PUT", + "uri": "/portals/{portalArn+}", + "code": 200 + }, + "smithy.api#idempotent": {} + } + }, + "com.amazonaws.workspacesweb#UpdatePortalRequest": { + "type": "structure", + "members": { + "portalArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the web portal.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "displayName": { + "target": "com.amazonaws.workspacesweb#DisplayName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the web portal. This is not visible to users who log into the web portal.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#UpdatePortalResponse": { + "type": "structure", + "members": { + "portal": { + "target": "com.amazonaws.workspacesweb#Portal", + "traits": { + "smithy.api#documentation": "

                                                                      The web portal.

                                                                      " + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateTrustStore": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdateTrustStoreRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdateTrustStoreResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ServiceQuotaExceededException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the trust store.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/trustStores/{trustStoreArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#UpdateTrustStoreRequest": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "certificatesToAdd": { + "target": "com.amazonaws.workspacesweb#CertificateList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of CA certificates to add to the trust store.

                                                                      " + } + }, + "certificatesToDelete": { + "target": "com.amazonaws.workspacesweb#CertificateThumbprintList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of CA certificates to delete from a trust store.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token return the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateTrustStoreResponse": { + "type": "structure", + "members": { + "trustStoreArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the trust store.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateUserSettings": { + "type": "operation", + "input": { + "target": "com.amazonaws.workspacesweb#UpdateUserSettingsRequest" + }, + "output": { + "target": "com.amazonaws.workspacesweb#UpdateUserSettingsResponse" + }, + "errors": [ + { + "target": "com.amazonaws.workspacesweb#AccessDeniedException" + }, + { + "target": "com.amazonaws.workspacesweb#InternalServerException" + }, + { + "target": "com.amazonaws.workspacesweb#ResourceNotFoundException" + }, + { + "target": "com.amazonaws.workspacesweb#ThrottlingException" + }, + { + "target": "com.amazonaws.workspacesweb#ValidationException" + } + ], + "traits": { + "smithy.api#documentation": "

                                                                      Updates the user settings.

                                                                      ", + "smithy.api#http": { + "method": "PATCH", + "uri": "/userSettings/{userSettingsArn+}", + "code": 200 + } + } + }, + "com.amazonaws.workspacesweb#UpdateUserSettingsRequest": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#httpLabel": {}, + "smithy.api#required": {} + } + }, + "copyAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can copy text from the streaming session to the local\n device.

                                                                      " + } + }, + "pasteAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can paste text from the local device to the streaming\n session.

                                                                      " + } + }, + "downloadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can download files from the streaming session to the local\n device.

                                                                      " + } + }, + "uploadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can upload files from the local device to the streaming\n session.

                                                                      " + } + }, + "printAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can print to the local device.

                                                                      " + } + }, + "clientToken": { + "target": "com.amazonaws.workspacesweb#ClientToken", + "traits": { + "smithy.api#documentation": "

                                                                      A unique, case-sensitive identifier that you provide to ensure the idempotency of the\n request. Idempotency ensures that an API request completes only once. With an idempotent\n request, if the original request completes successfully, subsequent retries with the same\n client token return the result from the original successful request.

                                                                      \n

                                                                      If you do not specify a client token, one is automatically generated by the AWS\n SDK.

                                                                      ", + "smithy.api#idempotencyToken": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UpdateUserSettingsResponse": { + "type": "structure", + "members": { + "userSettings": { + "target": "com.amazonaws.workspacesweb#UserSettings", + "traits": { + "smithy.api#documentation": "

                                                                      The user settings.

                                                                      ", + "smithy.api#required": {} + } + } + } + }, + "com.amazonaws.workspacesweb#UserSettings": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      ", + "smithy.api#required": {} + } + }, + "associatedPortalArns": { + "target": "com.amazonaws.workspacesweb#ArnList", + "traits": { + "smithy.api#documentation": "

                                                                      A list of web portal ARNs that this user settings is associated with.

                                                                      " + } + }, + "copyAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can copy text from the streaming session to the local\n device.

                                                                      " + } + }, + "pasteAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can paste text from the local device to the streaming\n session.

                                                                      " + } + }, + "downloadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can download files from the streaming session to the local\n device.

                                                                      " + } + }, + "uploadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can upload files from the local device to the streaming\n session.

                                                                      " + } + }, + "printAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can print to the local device.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      A user settings resource that can be associated with a web portal. Once associated with\n a web portal, user settings control how users can transfer data between a streaming session\n and the their local devices.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#UserSettingsList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#UserSettingsSummary" + } + }, + "com.amazonaws.workspacesweb#UserSettingsSummary": { + "type": "structure", + "members": { + "userSettingsArn": { + "target": "com.amazonaws.workspacesweb#ARN", + "traits": { + "smithy.api#documentation": "

                                                                      The ARN of the user settings.

                                                                      " + } + }, + "copyAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can copy text from the streaming session to the local\n device.

                                                                      " + } + }, + "pasteAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can paste text from the local device to the streaming\n session.

                                                                      " + } + }, + "downloadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can download files from the streaming session to the local\n device.

                                                                      " + } + }, + "uploadAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can upload files from the local device to the streaming\n session.

                                                                      " + } + }, + "printAllowed": { + "target": "com.amazonaws.workspacesweb#EnabledType", + "traits": { + "smithy.api#documentation": "

                                                                      Specifies whether the user can print to the local device.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      The summary of user settings.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#ValidationException": { + "type": "structure", + "members": { + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage" + }, + "reason": { + "target": "com.amazonaws.workspacesweb#ValidationExceptionReason", + "traits": { + "smithy.api#documentation": "

                                                                      Reason the request failed validation

                                                                      " + } + }, + "fieldList": { + "target": "com.amazonaws.workspacesweb#ValidationExceptionFieldList", + "traits": { + "smithy.api#documentation": "

                                                                      The field that caused the error.

                                                                      " + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      There is a validation error.

                                                                      ", + "smithy.api#error": "client", + "smithy.api#httpError": 400 + } + }, + "com.amazonaws.workspacesweb#ValidationExceptionField": { + "type": "structure", + "members": { + "name": { + "target": "com.amazonaws.workspacesweb#FieldName", + "traits": { + "smithy.api#documentation": "

                                                                      The name of the field that failed validation.

                                                                      ", + "smithy.api#required": {} + } + }, + "message": { + "target": "com.amazonaws.workspacesweb#ExceptionMessage", + "traits": { + "smithy.api#documentation": "

                                                                      The message describing why the field failed validation.

                                                                      ", + "smithy.api#required": {} + } + } + }, + "traits": { + "smithy.api#documentation": "

                                                                      Information about a field passed inside a request that resulted in an exception.

                                                                      " + } + }, + "com.amazonaws.workspacesweb#ValidationExceptionFieldList": { + "type": "list", + "member": { + "target": "com.amazonaws.workspacesweb#ValidationExceptionField" + } + }, + "com.amazonaws.workspacesweb#ValidationExceptionReason": { + "type": "string", + "traits": { + "smithy.api#enum": [ + { + "value": "unknownOperation", + "name": "UNKNOWN_OPERATION" + }, + { + "value": "cannotParse", + "name": "CANNOT_PARSE" + }, + { + "value": "fieldValidationFailed", + "name": "FIELD_VALIDATION_FAILED" + }, + { + "value": "other", + "name": "OTHER" + } + ] + } + }, + "com.amazonaws.workspacesweb#VpcId": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 1, + "max": 255 + }, + "smithy.api#pattern": "^vpc-[0-9a-z]*$" + } + }, + "com.amazonaws.workspacesweb#keyArn": { + "type": "string", + "traits": { + "smithy.api#length": { + "min": 20, + "max": 2048 + }, + "smithy.api#pattern": "^arn:[\\w+=\\/,.@-]+:kms:[a-zA-Z0-9\\-]*:[a-zA-Z0-9]{1,12}:key\\/[a-zA-Z0-9-]+$" + } + } + } +} diff --git a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json index f385dfc9ed2f..f00910fab036 100644 --- a/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json +++ b/codegen/smithy-aws-typescript-codegen/src/main/resources/software/amazon/smithy/aws/typescript/codegen/endpoints.json @@ -1161,6 +1161,7 @@ "ap-east-1": {}, "ap-northeast-1": {}, "ap-northeast-2": {}, + "ap-northeast-3": {}, "ap-south-1": {}, "ap-southeast-1": {}, "ap-southeast-2": {}, @@ -1850,16 +1851,8 @@ }, "cloudhsm": { "endpoints": { - "ap-northeast-1": {}, - "ap-southeast-1": {}, - "ap-southeast-2": {}, - "ca-central-1": {}, - "eu-central-1": {}, "eu-west-1": {}, - "us-east-1": {}, - "us-east-2": {}, - "us-west-1": {}, - "us-west-2": {} + "us-east-1": {} } }, "cloudhsmv2": { @@ -5112,6 +5105,37 @@ } } }, + "evidently": { + "endpoints": { + "ap-northeast-1": { + "hostname": "evidently.ap-northeast-1.amazonaws.com" + }, + "ap-southeast-1": { + "hostname": "evidently.ap-southeast-1.amazonaws.com" + }, + "ap-southeast-2": { + "hostname": "evidently.ap-southeast-2.amazonaws.com" + }, + "eu-central-1": { + "hostname": "evidently.eu-central-1.amazonaws.com" + }, + "eu-north-1": { + "hostname": "evidently.eu-north-1.amazonaws.com" + }, + "eu-west-1": { + "hostname": "evidently.eu-west-1.amazonaws.com" + }, + "us-east-1": { + "hostname": "evidently.us-east-1.amazonaws.com" + }, + "us-east-2": { + "hostname": "evidently.us-east-2.amazonaws.com" + }, + "us-west-2": { + "hostname": "evidently.us-west-2.amazonaws.com" + } + } + }, "finspace": { "endpoints": { "ca-central-1": {}, @@ -6449,6 +6473,29 @@ } } }, + "inspector2": { + "endpoints": { + "ap-east-1": {}, + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-south-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "me-south-1": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, "iot": { "defaults": { "credentialScope": { @@ -9325,6 +9372,31 @@ } } }, + "rbin": { + "endpoints": { + "af-south-1": {}, + "ap-east-1": {}, + "ap-northeast-1": {}, + "ap-northeast-2": {}, + "ap-northeast-3": {}, + "ap-south-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "ca-central-1": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-south-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "eu-west-3": {}, + "me-south-1": {}, + "sa-east-1": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-1": {}, + "us-west-2": {} + } + }, "rds": { "endpoints": { "af-south-1": {}, @@ -9953,6 +10025,20 @@ "us-west-2": {} } }, + "rum": { + "endpoints": { + "ap-northeast-1": {}, + "ap-southeast-1": {}, + "ap-southeast-2": {}, + "eu-central-1": {}, + "eu-north-1": {}, + "eu-west-1": {}, + "eu-west-2": {}, + "us-east-1": {}, + "us-east-2": {}, + "us-west-2": {} + } + }, "runtime-v2-lex": { "endpoints": { "af-south-1": {},